Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 0e3baaf3

History | View | Annotate | Download (337.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 b98bf262 Michael Hanselmann
import OpenSSL
37 a8083063 Iustin Pop
38 a8083063 Iustin Pop
from ganeti import ssh
39 a8083063 Iustin Pop
from ganeti import utils
40 a8083063 Iustin Pop
from ganeti import errors
41 a8083063 Iustin Pop
from ganeti import hypervisor
42 6048c986 Guido Trotter
from ganeti import locking
43 a8083063 Iustin Pop
from ganeti import constants
44 a8083063 Iustin Pop
from ganeti import objects
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
55 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
64 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
65 20777413 Iustin Pop

66 a8083063 Iustin Pop
  """
67 a8083063 Iustin Pop
  HPATH = None
68 a8083063 Iustin Pop
  HTYPE = None
69 a8083063 Iustin Pop
  _OP_REQP = []
70 7e55040e Guido Trotter
  REQ_BGL = True
71 a8083063 Iustin Pop
72 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
73 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
74 a8083063 Iustin Pop

75 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
76 a8083063 Iustin Pop
    validity.
77 a8083063 Iustin Pop

78 a8083063 Iustin Pop
    """
79 5bfac263 Iustin Pop
    self.proc = processor
80 a8083063 Iustin Pop
    self.op = op
81 77b657a3 Guido Trotter
    self.cfg = context.cfg
82 77b657a3 Guido Trotter
    self.context = context
83 72737a7f Iustin Pop
    self.rpc = rpc
84 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
85 d465bdc8 Guido Trotter
    self.needed_locks = None
86 6683bba2 Guido Trotter
    self.acquired_locks = {}
87 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
88 ca2a79e1 Guido Trotter
    self.add_locks = {}
89 ca2a79e1 Guido Trotter
    self.remove_locks = {}
90 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
91 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
92 c92b310a Michael Hanselmann
    self.__ssh = None
93 86d9d3bb Iustin Pop
    # logging
94 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
95 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
96 d984846d Iustin Pop
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
97 20777413 Iustin Pop
    # support for dry-run
98 20777413 Iustin Pop
    self.dry_run_result = None
99 ee844e20 Iustin Pop
    # support for generic debug attribute
100 ee844e20 Iustin Pop
    if (not hasattr(self.op, "debug_level") or
101 ee844e20 Iustin Pop
        not isinstance(self.op.debug_level, int)):
102 ee844e20 Iustin Pop
      self.op.debug_level = 0
103 c92b310a Michael Hanselmann
104 6fd35c4d Michael Hanselmann
    # Tasklets
105 3a012b41 Michael Hanselmann
    self.tasklets = None
106 6fd35c4d Michael Hanselmann
107 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
108 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
109 a8083063 Iustin Pop
      if attr_val is None:
110 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
111 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
112 6fd35c4d Michael Hanselmann
113 4be4691d Iustin Pop
    self.CheckArguments()
114 a8083063 Iustin Pop
115 c92b310a Michael Hanselmann
  def __GetSSH(self):
116 c92b310a Michael Hanselmann
    """Returns the SshRunner object
117 c92b310a Michael Hanselmann

118 c92b310a Michael Hanselmann
    """
119 c92b310a Michael Hanselmann
    if not self.__ssh:
120 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
121 c92b310a Michael Hanselmann
    return self.__ssh
122 c92b310a Michael Hanselmann
123 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
124 c92b310a Michael Hanselmann
125 4be4691d Iustin Pop
  def CheckArguments(self):
126 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
127 4be4691d Iustin Pop

128 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
129 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
130 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
131 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
132 4be4691d Iustin Pop

133 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
134 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
135 4be4691d Iustin Pop
        waited for them)
136 4be4691d Iustin Pop

137 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
138 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
139 4be4691d Iustin Pop

140 4be4691d Iustin Pop
    """
141 4be4691d Iustin Pop
    pass
142 4be4691d Iustin Pop
143 d465bdc8 Guido Trotter
  def ExpandNames(self):
144 d465bdc8 Guido Trotter
    """Expand names for this LU.
145 d465bdc8 Guido Trotter

146 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
147 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
148 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
149 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
150 d465bdc8 Guido Trotter

151 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
152 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
153 d465bdc8 Guido Trotter
    as values. Rules:
154 e4376078 Iustin Pop

155 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
156 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
157 e4376078 Iustin Pop
      - don't put anything for the BGL level
158 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
159 d465bdc8 Guido Trotter

160 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
161 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
162 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
163 3977a4c1 Guido Trotter

164 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
165 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
166 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
167 6fd35c4d Michael Hanselmann

168 e4376078 Iustin Pop
    Examples::
169 e4376078 Iustin Pop

170 e4376078 Iustin Pop
      # Acquire all nodes and one instance
171 e4376078 Iustin Pop
      self.needed_locks = {
172 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
173 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
174 e4376078 Iustin Pop
      }
175 e4376078 Iustin Pop
      # Acquire just two nodes
176 e4376078 Iustin Pop
      self.needed_locks = {
177 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
178 e4376078 Iustin Pop
      }
179 e4376078 Iustin Pop
      # Acquire no locks
180 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
181 d465bdc8 Guido Trotter

182 d465bdc8 Guido Trotter
    """
183 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
184 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
185 d465bdc8 Guido Trotter
    # time.
186 d465bdc8 Guido Trotter
    if self.REQ_BGL:
187 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
188 d465bdc8 Guido Trotter
    else:
189 d465bdc8 Guido Trotter
      raise NotImplementedError
190 d465bdc8 Guido Trotter
191 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
192 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
193 fb8dcb62 Guido Trotter

194 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
195 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
196 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
197 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
198 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
199 fb8dcb62 Guido Trotter
    default it does nothing.
200 fb8dcb62 Guido Trotter

201 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
202 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
203 fb8dcb62 Guido Trotter

204 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
205 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
206 fb8dcb62 Guido Trotter

207 fb8dcb62 Guido Trotter
    """
208 fb8dcb62 Guido Trotter
209 a8083063 Iustin Pop
  def CheckPrereq(self):
210 a8083063 Iustin Pop
    """Check prerequisites for this LU.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
213 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
214 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
215 a8083063 Iustin Pop
    allowed.
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
218 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
221 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    """
224 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
225 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
226 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
227 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
228 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
229 6fd35c4d Michael Hanselmann
    else:
230 6fd35c4d Michael Hanselmann
      raise NotImplementedError
231 a8083063 Iustin Pop
232 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
233 a8083063 Iustin Pop
    """Execute the LU.
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
236 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
237 a8083063 Iustin Pop
    code, or expected.
238 a8083063 Iustin Pop

239 a8083063 Iustin Pop
    """
240 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
241 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
242 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
243 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
244 6fd35c4d Michael Hanselmann
    else:
245 6fd35c4d Michael Hanselmann
      raise NotImplementedError
246 a8083063 Iustin Pop
247 a8083063 Iustin Pop
  def BuildHooksEnv(self):
248 a8083063 Iustin Pop
    """Build hooks environment for this LU.
249 a8083063 Iustin Pop

250 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
251 a8083063 Iustin Pop
    containing the environment that will be used for running the
252 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
253 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
254 a8083063 Iustin Pop
    the hook should run after the execution.
255 a8083063 Iustin Pop

256 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
257 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
258 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
259 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
260 a8083063 Iustin Pop

261 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
262 a8083063 Iustin Pop

263 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
264 a8083063 Iustin Pop
    not be called.
265 a8083063 Iustin Pop

266 a8083063 Iustin Pop
    """
267 a8083063 Iustin Pop
    raise NotImplementedError
268 a8083063 Iustin Pop
269 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
270 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
271 1fce5219 Guido Trotter

272 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
273 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
274 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
275 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
276 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
277 1fce5219 Guido Trotter

278 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
279 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
280 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
281 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
282 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
283 e4376078 Iustin Pop
        in the PRE phase
284 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
285 e4376078 Iustin Pop
        and hook results
286 1fce5219 Guido Trotter

287 1fce5219 Guido Trotter
    """
288 2d54e29c Iustin Pop
    # API must be kept, thus we ignore the unused argument and could
289 2d54e29c Iustin Pop
    # be a function warnings
290 2d54e29c Iustin Pop
    # pylint: disable-msg=W0613,R0201
291 1fce5219 Guido Trotter
    return lu_result
292 1fce5219 Guido Trotter
293 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
294 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
295 43905206 Guido Trotter

296 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
297 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
298 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
299 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
300 43905206 Guido Trotter
    before.
301 43905206 Guido Trotter

302 43905206 Guido Trotter
    """
303 43905206 Guido Trotter
    if self.needed_locks is None:
304 43905206 Guido Trotter
      self.needed_locks = {}
305 43905206 Guido Trotter
    else:
306 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
307 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
308 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
309 cf26a87a Iustin Pop
                                                self.op.instance_name)
310 cf26a87a Iustin Pop
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
311 43905206 Guido Trotter
312 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
313 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
316 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
317 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
318 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
319 c4a2fee1 Guido Trotter

320 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
321 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
322 c4a2fee1 Guido Trotter

323 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
324 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
325 c4a2fee1 Guido Trotter

326 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
327 c4a2fee1 Guido Trotter

328 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
329 e4376078 Iustin Pop
        self._LockInstancesNodes()
330 c4a2fee1 Guido Trotter

331 a82ce292 Guido Trotter
    @type primary_only: boolean
332 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
333 a82ce292 Guido Trotter

334 c4a2fee1 Guido Trotter
    """
335 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
336 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
337 c4a2fee1 Guido Trotter
338 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
339 c4a2fee1 Guido Trotter
340 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
341 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
342 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
343 c4a2fee1 Guido Trotter
    wanted_nodes = []
344 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
345 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
346 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
347 a82ce292 Guido Trotter
      if not primary_only:
348 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
349 9513b6ab Guido Trotter
350 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
351 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
352 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
353 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
354 c4a2fee1 Guido Trotter
355 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
356 c4a2fee1 Guido Trotter
357 a8083063 Iustin Pop
358 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
359 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
360 a8083063 Iustin Pop

361 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
362 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
363 a8083063 Iustin Pop

364 a8083063 Iustin Pop
  """
365 a8083063 Iustin Pop
  HPATH = None
366 a8083063 Iustin Pop
  HTYPE = None
367 a8083063 Iustin Pop
368 fc8a6b8f Iustin Pop
  def BuildHooksEnv(self):
369 fc8a6b8f Iustin Pop
    """Empty BuildHooksEnv for NoHooksLu.
370 fc8a6b8f Iustin Pop

371 fc8a6b8f Iustin Pop
    This just raises an error.
372 fc8a6b8f Iustin Pop

373 fc8a6b8f Iustin Pop
    """
374 fc8a6b8f Iustin Pop
    assert False, "BuildHooksEnv called for NoHooksLUs"
375 fc8a6b8f Iustin Pop
376 a8083063 Iustin Pop
377 9a6800e1 Michael Hanselmann
class Tasklet:
378 9a6800e1 Michael Hanselmann
  """Tasklet base class.
379 9a6800e1 Michael Hanselmann

380 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
381 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
382 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
383 9a6800e1 Michael Hanselmann

384 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
385 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
386 9a6800e1 Michael Hanselmann
    - Implement Exec
387 9a6800e1 Michael Hanselmann

388 9a6800e1 Michael Hanselmann
  """
389 464243a7 Michael Hanselmann
  def __init__(self, lu):
390 464243a7 Michael Hanselmann
    self.lu = lu
391 464243a7 Michael Hanselmann
392 464243a7 Michael Hanselmann
    # Shortcuts
393 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
394 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
395 464243a7 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
397 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
400 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
401 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
404 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
405 9a6800e1 Michael Hanselmann

406 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
407 9a6800e1 Michael Hanselmann
    hasn't been done before.
408 9a6800e1 Michael Hanselmann

409 9a6800e1 Michael Hanselmann
    """
410 9a6800e1 Michael Hanselmann
    raise NotImplementedError
411 9a6800e1 Michael Hanselmann
412 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
413 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
414 9a6800e1 Michael Hanselmann

415 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
416 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
417 9a6800e1 Michael Hanselmann
    expected.
418 9a6800e1 Michael Hanselmann

419 9a6800e1 Michael Hanselmann
    """
420 9a6800e1 Michael Hanselmann
    raise NotImplementedError
421 9a6800e1 Michael Hanselmann
422 9a6800e1 Michael Hanselmann
423 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
424 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
425 83120a01 Michael Hanselmann

426 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
427 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
428 e4376078 Iustin Pop
  @type nodes: list
429 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
430 e4376078 Iustin Pop
  @rtype: list
431 e4376078 Iustin Pop
  @return: the list of nodes, sorted
432 083a91c9 Iustin Pop
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
433 83120a01 Michael Hanselmann

434 83120a01 Michael Hanselmann
  """
435 3312b702 Iustin Pop
  if not isinstance(nodes, list):
436 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
437 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
438 dcb93971 Michael Hanselmann
439 ea47808a Guido Trotter
  if not nodes:
440 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
441 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
442 dcb93971 Michael Hanselmann
443 61dabca4 Iustin Pop
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
444 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
445 3312b702 Iustin Pop
446 3312b702 Iustin Pop
447 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
448 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
449 3312b702 Iustin Pop

450 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
451 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
452 e4376078 Iustin Pop
  @type instances: list
453 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
454 e4376078 Iustin Pop
  @rtype: list
455 e4376078 Iustin Pop
  @return: the list of instances, sorted
456 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
457 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
458 3312b702 Iustin Pop

459 3312b702 Iustin Pop
  """
460 3312b702 Iustin Pop
  if not isinstance(instances, list):
461 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
462 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
463 3312b702 Iustin Pop
464 3312b702 Iustin Pop
  if instances:
465 cf26a87a Iustin Pop
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
466 3312b702 Iustin Pop
  else:
467 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
468 a7f5dc98 Iustin Pop
  return wanted
469 dcb93971 Michael Hanselmann
470 dcb93971 Michael Hanselmann
471 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
472 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
473 83120a01 Michael Hanselmann

474 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
475 31bf511f Iustin Pop
  @param static: static fields set
476 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
477 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
478 83120a01 Michael Hanselmann

479 83120a01 Michael Hanselmann
  """
480 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
481 31bf511f Iustin Pop
  f.Extend(static)
482 31bf511f Iustin Pop
  f.Extend(dynamic)
483 dcb93971 Michael Hanselmann
484 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
485 31bf511f Iustin Pop
  if delta:
486 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
487 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
488 dcb93971 Michael Hanselmann
489 dcb93971 Michael Hanselmann
490 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
491 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
492 a5961235 Iustin Pop

493 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
494 a5961235 Iustin Pop
  or None (but that it always exists).
495 a5961235 Iustin Pop

496 a5961235 Iustin Pop
  """
497 a5961235 Iustin Pop
  val = getattr(op, name, None)
498 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
499 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
500 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
501 a5961235 Iustin Pop
  setattr(op, name, val)
502 a5961235 Iustin Pop
503 a5961235 Iustin Pop
504 7736a5f2 Iustin Pop
def _CheckGlobalHvParams(params):
505 7736a5f2 Iustin Pop
  """Validates that given hypervisor params are not global ones.
506 7736a5f2 Iustin Pop

507 7736a5f2 Iustin Pop
  This will ensure that instances don't get customised versions of
508 7736a5f2 Iustin Pop
  global params.
509 7736a5f2 Iustin Pop

510 7736a5f2 Iustin Pop
  """
511 7736a5f2 Iustin Pop
  used_globals = constants.HVC_GLOBALS.intersection(params)
512 7736a5f2 Iustin Pop
  if used_globals:
513 7736a5f2 Iustin Pop
    msg = ("The following hypervisor parameters are global and cannot"
514 7736a5f2 Iustin Pop
           " be customized at instance level, please modify them at"
515 1f864b60 Iustin Pop
           " cluster level: %s" % utils.CommaJoin(used_globals))
516 7736a5f2 Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
517 7736a5f2 Iustin Pop
518 7736a5f2 Iustin Pop
519 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
520 a5961235 Iustin Pop
  """Ensure that a given node is online.
521 a5961235 Iustin Pop

522 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
523 a5961235 Iustin Pop
  @param node: the node to check
524 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
525 a5961235 Iustin Pop

526 a5961235 Iustin Pop
  """
527 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
528 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
529 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
530 a5961235 Iustin Pop
531 a5961235 Iustin Pop
532 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
533 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
534 733a2b6a Iustin Pop

535 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
536 733a2b6a Iustin Pop
  @param node: the node to check
537 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
538 733a2b6a Iustin Pop

539 733a2b6a Iustin Pop
  """
540 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
541 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
542 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
543 733a2b6a Iustin Pop
544 733a2b6a Iustin Pop
545 231cd901 Iustin Pop
def _CheckNodeHasOS(lu, node, os_name, force_variant):
546 231cd901 Iustin Pop
  """Ensure that a node supports a given OS.
547 231cd901 Iustin Pop

548 231cd901 Iustin Pop
  @param lu: the LU on behalf of which we make the check
549 231cd901 Iustin Pop
  @param node: the node to check
550 231cd901 Iustin Pop
  @param os_name: the OS to query about
551 231cd901 Iustin Pop
  @param force_variant: whether to ignore variant errors
552 231cd901 Iustin Pop
  @raise errors.OpPrereqError: if the node is not supporting the OS
553 231cd901 Iustin Pop

554 231cd901 Iustin Pop
  """
555 231cd901 Iustin Pop
  result = lu.rpc.call_os_get(node, os_name)
556 231cd901 Iustin Pop
  result.Raise("OS '%s' not in supported OS list for node %s" %
557 231cd901 Iustin Pop
               (os_name, node),
558 231cd901 Iustin Pop
               prereq=True, ecode=errors.ECODE_INVAL)
559 231cd901 Iustin Pop
  if not force_variant:
560 231cd901 Iustin Pop
    _CheckOSVariant(result.payload, os_name)
561 231cd901 Iustin Pop
562 231cd901 Iustin Pop
563 0e3baaf3 Iustin Pop
def _RequireFileStorage():
564 0e3baaf3 Iustin Pop
  """Checks that file storage is enabled.
565 0e3baaf3 Iustin Pop

566 0e3baaf3 Iustin Pop
  @raise errors.OpPrereqError: when file storage is disabled
567 0e3baaf3 Iustin Pop

568 0e3baaf3 Iustin Pop
  """
569 0e3baaf3 Iustin Pop
  if not constants.ENABLE_FILE_STORAGE:
570 0e3baaf3 Iustin Pop
    raise errors.OpPrereqError("File storage disabled at configure time",
571 0e3baaf3 Iustin Pop
                               errors.ECODE_INVAL)
572 0e3baaf3 Iustin Pop
573 0e3baaf3 Iustin Pop
574 5d55819e Iustin Pop
def _CheckDiskTemplate(template):
575 5d55819e Iustin Pop
  """Ensure a given disk template is valid.
576 5d55819e Iustin Pop

577 5d55819e Iustin Pop
  """
578 5d55819e Iustin Pop
  if template not in constants.DISK_TEMPLATES:
579 5d55819e Iustin Pop
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
580 5d55819e Iustin Pop
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
581 5d55819e Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
582 0e3baaf3 Iustin Pop
  if template == constants.DT_FILE:
583 0e3baaf3 Iustin Pop
    _RequireFileStorage()
584 0e3baaf3 Iustin Pop
585 0e3baaf3 Iustin Pop
586 0e3baaf3 Iustin Pop
def _CheckStorageType(storage_type):
587 0e3baaf3 Iustin Pop
  """Ensure a given storage type is valid.
588 0e3baaf3 Iustin Pop

589 0e3baaf3 Iustin Pop
  """
590 0e3baaf3 Iustin Pop
  if storage_type not in constants.VALID_STORAGE_TYPES:
591 0e3baaf3 Iustin Pop
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
592 f276c4b5 Iustin Pop
                               errors.ECODE_INVAL)
593 0e3baaf3 Iustin Pop
  if storage_type == constants.ST_FILE:
594 0e3baaf3 Iustin Pop
    _RequireFileStorage()
595 0e3baaf3 Iustin Pop
596 5d55819e Iustin Pop
597 5d55819e Iustin Pop
598 31624382 Iustin Pop
def _CheckInstanceDown(lu, instance, reason):
599 31624382 Iustin Pop
  """Ensure that an instance is not running."""
600 31624382 Iustin Pop
  if instance.admin_up:
601 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
602 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
603 31624382 Iustin Pop
604 31624382 Iustin Pop
  pnode = instance.primary_node
605 31624382 Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
606 31624382 Iustin Pop
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
607 31624382 Iustin Pop
              prereq=True, ecode=errors.ECODE_ENVIRON)
608 31624382 Iustin Pop
609 31624382 Iustin Pop
  if instance.name in ins_l.payload:
610 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is running, %s" %
611 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
612 31624382 Iustin Pop
613 31624382 Iustin Pop
614 cf26a87a Iustin Pop
def _ExpandItemName(fn, name, kind):
615 cf26a87a Iustin Pop
  """Expand an item name.
616 cf26a87a Iustin Pop

617 cf26a87a Iustin Pop
  @param fn: the function to use for expansion
618 cf26a87a Iustin Pop
  @param name: requested item name
619 cf26a87a Iustin Pop
  @param kind: text description ('Node' or 'Instance')
620 cf26a87a Iustin Pop
  @return: the resolved (full) name
621 cf26a87a Iustin Pop
  @raise errors.OpPrereqError: if the item is not found
622 cf26a87a Iustin Pop

623 cf26a87a Iustin Pop
  """
624 cf26a87a Iustin Pop
  full_name = fn(name)
625 cf26a87a Iustin Pop
  if full_name is None:
626 cf26a87a Iustin Pop
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
627 cf26a87a Iustin Pop
                               errors.ECODE_NOENT)
628 cf26a87a Iustin Pop
  return full_name
629 cf26a87a Iustin Pop
630 cf26a87a Iustin Pop
631 cf26a87a Iustin Pop
def _ExpandNodeName(cfg, name):
632 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for nodes."""
633 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
634 cf26a87a Iustin Pop
635 cf26a87a Iustin Pop
636 cf26a87a Iustin Pop
def _ExpandInstanceName(cfg, name):
637 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for instance."""
638 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
639 cf26a87a Iustin Pop
640 cf26a87a Iustin Pop
641 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
642 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
643 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
644 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
645 e4376078 Iustin Pop

646 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
647 e4376078 Iustin Pop

648 e4376078 Iustin Pop
  @type name: string
649 e4376078 Iustin Pop
  @param name: the name of the instance
650 e4376078 Iustin Pop
  @type primary_node: string
651 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
652 e4376078 Iustin Pop
  @type secondary_nodes: list
653 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
654 e4376078 Iustin Pop
  @type os_type: string
655 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
656 0d68c45d Iustin Pop
  @type status: boolean
657 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
658 e4376078 Iustin Pop
  @type memory: string
659 e4376078 Iustin Pop
  @param memory: the memory size of the instance
660 e4376078 Iustin Pop
  @type vcpus: string
661 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
662 e4376078 Iustin Pop
  @type nics: list
663 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
664 5e3d3eb3 Guido Trotter
      the NICs the instance has
665 2c2690c9 Iustin Pop
  @type disk_template: string
666 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
667 2c2690c9 Iustin Pop
  @type disks: list
668 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
669 67fc3042 Iustin Pop
  @type bep: dict
670 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
671 67fc3042 Iustin Pop
  @type hvp: dict
672 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
673 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
674 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
675 e4376078 Iustin Pop
  @rtype: dict
676 e4376078 Iustin Pop
  @return: the hook environment for this instance
677 ecb215b5 Michael Hanselmann

678 396e1b78 Michael Hanselmann
  """
679 0d68c45d Iustin Pop
  if status:
680 0d68c45d Iustin Pop
    str_status = "up"
681 0d68c45d Iustin Pop
  else:
682 0d68c45d Iustin Pop
    str_status = "down"
683 396e1b78 Michael Hanselmann
  env = {
684 0e137c28 Iustin Pop
    "OP_TARGET": name,
685 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
686 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
687 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
688 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
689 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
690 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
691 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
692 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
693 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
694 396e1b78 Michael Hanselmann
  }
695 396e1b78 Michael Hanselmann
696 396e1b78 Michael Hanselmann
  if nics:
697 396e1b78 Michael Hanselmann
    nic_count = len(nics)
698 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
699 396e1b78 Michael Hanselmann
      if ip is None:
700 396e1b78 Michael Hanselmann
        ip = ""
701 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
702 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
703 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
704 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
705 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
706 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
707 396e1b78 Michael Hanselmann
  else:
708 396e1b78 Michael Hanselmann
    nic_count = 0
709 396e1b78 Michael Hanselmann
710 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
711 396e1b78 Michael Hanselmann
712 2c2690c9 Iustin Pop
  if disks:
713 2c2690c9 Iustin Pop
    disk_count = len(disks)
714 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
715 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
716 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
717 2c2690c9 Iustin Pop
  else:
718 2c2690c9 Iustin Pop
    disk_count = 0
719 2c2690c9 Iustin Pop
720 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
721 2c2690c9 Iustin Pop
722 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
723 67fc3042 Iustin Pop
    for key, value in source.items():
724 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
725 67fc3042 Iustin Pop
726 396e1b78 Michael Hanselmann
  return env
727 396e1b78 Michael Hanselmann
728 96acbc09 Michael Hanselmann
729 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
730 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
731 62f0dd02 Guido Trotter

732 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
733 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
734 62f0dd02 Guido Trotter

735 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
736 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
737 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
738 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
739 62f0dd02 Guido Trotter

740 62f0dd02 Guido Trotter
  """
741 62f0dd02 Guido Trotter
  hooks_nics = []
742 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
743 62f0dd02 Guido Trotter
  for nic in nics:
744 62f0dd02 Guido Trotter
    ip = nic.ip
745 62f0dd02 Guido Trotter
    mac = nic.mac
746 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
747 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
748 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
749 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
750 62f0dd02 Guido Trotter
  return hooks_nics
751 396e1b78 Michael Hanselmann
752 96acbc09 Michael Hanselmann
753 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
754 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
755 ecb215b5 Michael Hanselmann

756 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
757 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
758 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
759 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
760 e4376078 Iustin Pop
      environment
761 e4376078 Iustin Pop
  @type override: dict
762 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
763 e4376078 Iustin Pop
      our values
764 e4376078 Iustin Pop
  @rtype: dict
765 e4376078 Iustin Pop
  @return: the hook environment dictionary
766 e4376078 Iustin Pop

767 ecb215b5 Michael Hanselmann
  """
768 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
769 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
770 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
771 396e1b78 Michael Hanselmann
  args = {
772 396e1b78 Michael Hanselmann
    'name': instance.name,
773 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
774 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
775 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
776 0d68c45d Iustin Pop
    'status': instance.admin_up,
777 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
778 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
779 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
780 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
781 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
782 67fc3042 Iustin Pop
    'bep': bep,
783 67fc3042 Iustin Pop
    'hvp': hvp,
784 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
785 396e1b78 Michael Hanselmann
  }
786 396e1b78 Michael Hanselmann
  if override:
787 396e1b78 Michael Hanselmann
    args.update(override)
788 7260cfbe Iustin Pop
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
789 396e1b78 Michael Hanselmann
790 396e1b78 Michael Hanselmann
791 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
792 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
793 ec0292f1 Iustin Pop

794 ec0292f1 Iustin Pop
  """
795 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
796 ec0292f1 Iustin Pop
  if mod_list:
797 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
798 1f864b60 Iustin Pop
               utils.CommaJoin(node.name for node in mod_list))
799 ec0292f1 Iustin Pop
    for name in mod_list:
800 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
801 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
802 ec0292f1 Iustin Pop
  if mc_now > mc_max:
803 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
804 ec0292f1 Iustin Pop
               (mc_now, mc_max))
805 ec0292f1 Iustin Pop
806 ec0292f1 Iustin Pop
807 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
808 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
809 6d7e1f20 Guido Trotter

810 6d7e1f20 Guido Trotter
  """
811 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
812 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
813 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
814 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
815 6d7e1f20 Guido Trotter
  return mc_now < mc_should
816 6d7e1f20 Guido Trotter
817 6d7e1f20 Guido Trotter
818 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
819 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
820 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
821 b165e77e Guido Trotter

822 b165e77e Guido Trotter
  """
823 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
824 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
825 b165e77e Guido Trotter
                for nic in target_nics]
826 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
827 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
828 b165e77e Guido Trotter
  if brlist:
829 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
830 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
831 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
832 b165e77e Guido Trotter
833 b165e77e Guido Trotter
834 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
835 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
836 bf6929a2 Alexander Schreiber

837 bf6929a2 Alexander Schreiber
  """
838 b165e77e Guido Trotter
  if node is None:
839 29921401 Iustin Pop
    node = instance.primary_node
840 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
841 bf6929a2 Alexander Schreiber
842 bf6929a2 Alexander Schreiber
843 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
844 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
845 f2c05717 Guido Trotter

846 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
847 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
848 f2c05717 Guido Trotter
  @type name: string
849 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
850 f2c05717 Guido Trotter

851 f2c05717 Guido Trotter
  """
852 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
853 f2c05717 Guido Trotter
    return
854 f2c05717 Guido Trotter
  try:
855 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
856 f2c05717 Guido Trotter
  except IndexError:
857 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
858 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
859 f2c05717 Guido Trotter
860 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
861 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
862 f2c05717 Guido Trotter
863 f2c05717 Guido Trotter
864 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
865 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
866 5ba9701d Michael Hanselmann
867 5ba9701d Michael Hanselmann
868 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
869 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
870 e9721add Michael Hanselmann

871 e9721add Michael Hanselmann
  """
872 e9721add Michael Hanselmann
873 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
874 e9721add Michael Hanselmann
875 e9721add Michael Hanselmann
876 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
877 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
878 80cb875c Michael Hanselmann

879 80cb875c Michael Hanselmann
  """
880 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
881 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
882 80cb875c Michael Hanselmann
883 80cb875c Michael Hanselmann
884 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
885 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
886 692738fc Michael Hanselmann

887 692738fc Michael Hanselmann
  """
888 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
889 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
890 692738fc Michael Hanselmann
891 692738fc Michael Hanselmann
892 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
893 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
894 efb8da02 Michael Hanselmann

895 efb8da02 Michael Hanselmann
  """
896 efb8da02 Michael Hanselmann
  # Special case for file storage
897 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
898 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
899 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
900 efb8da02 Michael Hanselmann
901 efb8da02 Michael Hanselmann
  return []
902 efb8da02 Michael Hanselmann
903 efb8da02 Michael Hanselmann
904 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
905 2d9005d8 Michael Hanselmann
  faulty = []
906 2d9005d8 Michael Hanselmann
907 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
908 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
909 2d9005d8 Michael Hanselmann
910 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
911 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
912 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
913 2d9005d8 Michael Hanselmann
914 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
915 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
916 2d9005d8 Michael Hanselmann
      faulty.append(idx)
917 2d9005d8 Michael Hanselmann
918 2d9005d8 Michael Hanselmann
  return faulty
919 2d9005d8 Michael Hanselmann
920 2d9005d8 Michael Hanselmann
921 b98bf262 Michael Hanselmann
def _FormatTimestamp(secs):
922 b98bf262 Michael Hanselmann
  """Formats a Unix timestamp with the local timezone.
923 b98bf262 Michael Hanselmann

924 b98bf262 Michael Hanselmann
  """
925 b98bf262 Michael Hanselmann
  return time.strftime("%F %T %Z", time.gmtime(secs))
926 b98bf262 Michael Hanselmann
927 b98bf262 Michael Hanselmann
928 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
929 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
930 b5f5fae9 Luca Bigliardi

931 b5f5fae9 Luca Bigliardi
  """
932 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
933 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
934 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
935 b5f5fae9 Luca Bigliardi
936 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
937 b5f5fae9 Luca Bigliardi
    """Build hooks env.
938 b5f5fae9 Luca Bigliardi

939 b5f5fae9 Luca Bigliardi
    """
940 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
941 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
942 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
943 b5f5fae9 Luca Bigliardi
944 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
945 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
946 b5f5fae9 Luca Bigliardi

947 b5f5fae9 Luca Bigliardi
    """
948 b5f5fae9 Luca Bigliardi
    return True
949 b5f5fae9 Luca Bigliardi
950 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
951 b5f5fae9 Luca Bigliardi
    """Nothing to do.
952 b5f5fae9 Luca Bigliardi

953 b5f5fae9 Luca Bigliardi
    """
954 b5f5fae9 Luca Bigliardi
    return True
955 b5f5fae9 Luca Bigliardi
956 b5f5fae9 Luca Bigliardi
957 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
958 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
959 a8083063 Iustin Pop

960 a8083063 Iustin Pop
  """
961 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
962 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
963 a8083063 Iustin Pop
  _OP_REQP = []
964 a8083063 Iustin Pop
965 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
966 b2c750a4 Luca Bigliardi
    """Build hooks env.
967 b2c750a4 Luca Bigliardi

968 b2c750a4 Luca Bigliardi
    """
969 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
970 b2c750a4 Luca Bigliardi
    return env, [], []
971 b2c750a4 Luca Bigliardi
972 a8083063 Iustin Pop
  def CheckPrereq(self):
973 a8083063 Iustin Pop
    """Check prerequisites.
974 a8083063 Iustin Pop

975 a8083063 Iustin Pop
    This checks whether the cluster is empty.
976 a8083063 Iustin Pop

977 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
978 a8083063 Iustin Pop

979 a8083063 Iustin Pop
    """
980 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
981 a8083063 Iustin Pop
982 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
983 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
984 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
985 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
986 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
987 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
988 db915bd1 Michael Hanselmann
    if instancelist:
989 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
990 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
991 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
992 a8083063 Iustin Pop
993 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
994 a8083063 Iustin Pop
    """Destroys the cluster.
995 a8083063 Iustin Pop

996 a8083063 Iustin Pop
    """
997 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
998 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
999 3141ad3b Luca Bigliardi
1000 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
1001 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1002 3141ad3b Luca Bigliardi
    try:
1003 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1004 3141ad3b Luca Bigliardi
    except:
1005 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
1006 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
1007 3141ad3b Luca Bigliardi
1008 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1009 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1010 b989b9d9 Ken Wehr
1011 b989b9d9 Ken Wehr
    if modify_ssh_setup:
1012 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1013 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
1014 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
1015 b989b9d9 Ken Wehr
1016 140aa4a8 Iustin Pop
    return master
1017 a8083063 Iustin Pop
1018 a8083063 Iustin Pop
1019 b98bf262 Michael Hanselmann
def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
1020 b98bf262 Michael Hanselmann
                            warn_days=constants.SSL_CERT_EXPIRATION_WARN,
1021 b98bf262 Michael Hanselmann
                            error_days=constants.SSL_CERT_EXPIRATION_ERROR):
1022 b98bf262 Michael Hanselmann
  """Verifies certificate details for LUVerifyCluster.
1023 b98bf262 Michael Hanselmann

1024 b98bf262 Michael Hanselmann
  """
1025 b98bf262 Michael Hanselmann
  if expired:
1026 b98bf262 Michael Hanselmann
    msg = "Certificate %s is expired" % filename
1027 b98bf262 Michael Hanselmann
1028 b98bf262 Michael Hanselmann
    if not_before is not None and not_after is not None:
1029 b98bf262 Michael Hanselmann
      msg += (" (valid from %s to %s)" %
1030 b98bf262 Michael Hanselmann
              (_FormatTimestamp(not_before),
1031 b98bf262 Michael Hanselmann
               _FormatTimestamp(not_after)))
1032 b98bf262 Michael Hanselmann
    elif not_before is not None:
1033 b98bf262 Michael Hanselmann
      msg += " (valid from %s)" % _FormatTimestamp(not_before)
1034 b98bf262 Michael Hanselmann
    elif not_after is not None:
1035 b98bf262 Michael Hanselmann
      msg += " (valid until %s)" % _FormatTimestamp(not_after)
1036 b98bf262 Michael Hanselmann
1037 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR, msg)
1038 b98bf262 Michael Hanselmann
1039 b98bf262 Michael Hanselmann
  elif not_before is not None and not_before > now:
1040 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_WARNING,
1041 b98bf262 Michael Hanselmann
            "Certificate %s not yet valid (valid from %s)" %
1042 b98bf262 Michael Hanselmann
            (filename, _FormatTimestamp(not_before)))
1043 b98bf262 Michael Hanselmann
1044 b98bf262 Michael Hanselmann
  elif not_after is not None:
1045 b98bf262 Michael Hanselmann
    remaining_days = int((not_after - now) / (24 * 3600))
1046 b98bf262 Michael Hanselmann
1047 b98bf262 Michael Hanselmann
    msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
1048 b98bf262 Michael Hanselmann
1049 b98bf262 Michael Hanselmann
    if remaining_days <= error_days:
1050 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_ERROR, msg)
1051 b98bf262 Michael Hanselmann
1052 b98bf262 Michael Hanselmann
    if remaining_days <= warn_days:
1053 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_WARNING, msg)
1054 b98bf262 Michael Hanselmann
1055 b98bf262 Michael Hanselmann
  return (None, None)
1056 b98bf262 Michael Hanselmann
1057 b98bf262 Michael Hanselmann
1058 b98bf262 Michael Hanselmann
def _VerifyCertificate(filename):
1059 b98bf262 Michael Hanselmann
  """Verifies a certificate for LUVerifyCluster.
1060 b98bf262 Michael Hanselmann

1061 b98bf262 Michael Hanselmann
  @type filename: string
1062 b98bf262 Michael Hanselmann
  @param filename: Path to PEM file
1063 b98bf262 Michael Hanselmann

1064 b98bf262 Michael Hanselmann
  """
1065 b98bf262 Michael Hanselmann
  try:
1066 b98bf262 Michael Hanselmann
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1067 b98bf262 Michael Hanselmann
                                           utils.ReadFile(filename))
1068 b98bf262 Michael Hanselmann
  except Exception, err: # pylint: disable-msg=W0703
1069 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR,
1070 b98bf262 Michael Hanselmann
            "Failed to load X509 certificate %s: %s" % (filename, err))
1071 b98bf262 Michael Hanselmann
1072 b98bf262 Michael Hanselmann
  # Depending on the pyOpenSSL version, this can just return (None, None)
1073 b98bf262 Michael Hanselmann
  (not_before, not_after) = utils.GetX509CertValidity(cert)
1074 b98bf262 Michael Hanselmann
1075 b98bf262 Michael Hanselmann
  return _VerifyCertificateInner(filename, cert.has_expired(),
1076 b98bf262 Michael Hanselmann
                                 not_before, not_after, time.time())
1077 b98bf262 Michael Hanselmann
1078 b98bf262 Michael Hanselmann
1079 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
1080 a8083063 Iustin Pop
  """Verifies the cluster status.
1081 a8083063 Iustin Pop

1082 a8083063 Iustin Pop
  """
1083 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
1084 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
1085 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1086 d4b9d97f Guido Trotter
  REQ_BGL = False
1087 d4b9d97f Guido Trotter
1088 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
1089 7c874ee1 Iustin Pop
  TNODE = "node"
1090 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
1091 7c874ee1 Iustin Pop
1092 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1093 b98bf262 Michael Hanselmann
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1094 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1095 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1096 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1097 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1098 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1099 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1100 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
1101 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1102 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1103 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
1104 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
1105 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
1106 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
1107 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1108 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1109 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
1110 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
1111 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
1112 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
1113 313b2dd4 Michael Hanselmann
  ENODETIME = (TNODE, "ENODETIME")
1114 7c874ee1 Iustin Pop
1115 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
1116 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
1117 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
1118 a0c9776a Iustin Pop
1119 02c521e4 Iustin Pop
  class NodeImage(object):
1120 02c521e4 Iustin Pop
    """A class representing the logical and physical status of a node.
1121 02c521e4 Iustin Pop

1122 02c521e4 Iustin Pop
    @ivar volumes: a structure as returned from
1123 3a488770 Iustin Pop
        L{ganeti.backend.GetVolumeList} (runtime)
1124 02c521e4 Iustin Pop
    @ivar instances: a list of running instances (runtime)
1125 02c521e4 Iustin Pop
    @ivar pinst: list of configured primary instances (config)
1126 02c521e4 Iustin Pop
    @ivar sinst: list of configured secondary instances (config)
1127 02c521e4 Iustin Pop
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1128 02c521e4 Iustin Pop
        of this node (config)
1129 02c521e4 Iustin Pop
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1130 02c521e4 Iustin Pop
    @ivar dfree: free disk, as reported by the node (runtime)
1131 02c521e4 Iustin Pop
    @ivar offline: the offline status (config)
1132 02c521e4 Iustin Pop
    @type rpc_fail: boolean
1133 02c521e4 Iustin Pop
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1134 02c521e4 Iustin Pop
        not whether the individual keys were correct) (runtime)
1135 02c521e4 Iustin Pop
    @type lvm_fail: boolean
1136 02c521e4 Iustin Pop
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1137 02c521e4 Iustin Pop
    @type hyp_fail: boolean
1138 02c521e4 Iustin Pop
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1139 02c521e4 Iustin Pop
    @type ghost: boolean
1140 02c521e4 Iustin Pop
    @ivar ghost: whether this is a known node or not (config)
1141 02c521e4 Iustin Pop

1142 02c521e4 Iustin Pop
    """
1143 02c521e4 Iustin Pop
    def __init__(self, offline=False):
1144 02c521e4 Iustin Pop
      self.volumes = {}
1145 02c521e4 Iustin Pop
      self.instances = []
1146 02c521e4 Iustin Pop
      self.pinst = []
1147 02c521e4 Iustin Pop
      self.sinst = []
1148 02c521e4 Iustin Pop
      self.sbp = {}
1149 02c521e4 Iustin Pop
      self.mfree = 0
1150 02c521e4 Iustin Pop
      self.dfree = 0
1151 02c521e4 Iustin Pop
      self.offline = offline
1152 02c521e4 Iustin Pop
      self.rpc_fail = False
1153 02c521e4 Iustin Pop
      self.lvm_fail = False
1154 02c521e4 Iustin Pop
      self.hyp_fail = False
1155 02c521e4 Iustin Pop
      self.ghost = False
1156 02c521e4 Iustin Pop
1157 d4b9d97f Guido Trotter
  def ExpandNames(self):
1158 d4b9d97f Guido Trotter
    self.needed_locks = {
1159 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1160 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1161 d4b9d97f Guido Trotter
    }
1162 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1163 a8083063 Iustin Pop
1164 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
1165 7c874ee1 Iustin Pop
    """Format an error message.
1166 7c874ee1 Iustin Pop

1167 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
1168 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
1169 7c874ee1 Iustin Pop

1170 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
1171 7c874ee1 Iustin Pop

1172 7c874ee1 Iustin Pop
    """
1173 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1174 7c874ee1 Iustin Pop
    itype, etxt = ecode
1175 7c874ee1 Iustin Pop
    # first complete the msg
1176 7c874ee1 Iustin Pop
    if args:
1177 7c874ee1 Iustin Pop
      msg = msg % args
1178 7c874ee1 Iustin Pop
    # then format the whole message
1179 7c874ee1 Iustin Pop
    if self.op.error_codes:
1180 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1181 7c874ee1 Iustin Pop
    else:
1182 7c874ee1 Iustin Pop
      if item:
1183 7c874ee1 Iustin Pop
        item = " " + item
1184 7c874ee1 Iustin Pop
      else:
1185 7c874ee1 Iustin Pop
        item = ""
1186 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1187 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
1188 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
1189 7c874ee1 Iustin Pop
1190 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
1191 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
1192 a0c9776a Iustin Pop

1193 a0c9776a Iustin Pop
    """
1194 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
1195 a0c9776a Iustin Pop
    if cond:
1196 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
1197 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
1198 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1199 a0c9776a Iustin Pop
      self.bad = self.bad or cond
1200 a0c9776a Iustin Pop
1201 02c521e4 Iustin Pop
  def _VerifyNode(self, ninfo, nresult):
1202 a8083063 Iustin Pop
    """Run multiple tests against a node.
1203 a8083063 Iustin Pop

1204 112f18a5 Iustin Pop
    Test list:
1205 e4376078 Iustin Pop

1206 a8083063 Iustin Pop
      - compares ganeti version
1207 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
1208 a8083063 Iustin Pop
      - checks config file checksum
1209 a8083063 Iustin Pop
      - checks ssh to other nodes
1210 a8083063 Iustin Pop

1211 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1212 02c521e4 Iustin Pop
    @param ninfo: the node to check
1213 02c521e4 Iustin Pop
    @param nresult: the results from the node
1214 02c521e4 Iustin Pop
    @rtype: boolean
1215 02c521e4 Iustin Pop
    @return: whether overall this call was successful (and we can expect
1216 02c521e4 Iustin Pop
         reasonable values in the respose)
1217 098c0958 Michael Hanselmann

1218 a8083063 Iustin Pop
    """
1219 02c521e4 Iustin Pop
    node = ninfo.name
1220 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1221 25361b9a Iustin Pop
1222 02c521e4 Iustin Pop
    # main result, nresult should be a non-empty dict
1223 02c521e4 Iustin Pop
    test = not nresult or not isinstance(nresult, dict)
1224 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1225 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1226 a0c9776a Iustin Pop
    if test:
1227 02c521e4 Iustin Pop
      return False
1228 25361b9a Iustin Pop
1229 a8083063 Iustin Pop
    # compares ganeti version
1230 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1231 02c521e4 Iustin Pop
    remote_version = nresult.get("version", None)
1232 a0c9776a Iustin Pop
    test = not (remote_version and
1233 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1234 a0c9776a Iustin Pop
                len(remote_version) == 2)
1235 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1236 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1237 a0c9776a Iustin Pop
    if test:
1238 02c521e4 Iustin Pop
      return False
1239 a0c9776a Iustin Pop
1240 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1241 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1242 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1243 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1244 a0c9776a Iustin Pop
    if test:
1245 02c521e4 Iustin Pop
      return False
1246 a8083063 Iustin Pop
1247 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1248 a8083063 Iustin Pop
1249 e9ce0a64 Iustin Pop
    # full package version
1250 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1251 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1252 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1253 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1254 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1255 e9ce0a64 Iustin Pop
1256 02c521e4 Iustin Pop
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1257 02c521e4 Iustin Pop
    if isinstance(hyp_result, dict):
1258 02c521e4 Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1259 02c521e4 Iustin Pop
        test = hv_result is not None
1260 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1261 02c521e4 Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1262 a8083063 Iustin Pop
1263 a8083063 Iustin Pop
1264 02c521e4 Iustin Pop
    test = nresult.get(constants.NV_NODESETUP,
1265 02c521e4 Iustin Pop
                           ["Missing NODESETUP results"])
1266 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1267 02c521e4 Iustin Pop
             "; ".join(test))
1268 02c521e4 Iustin Pop
1269 02c521e4 Iustin Pop
    return True
1270 02c521e4 Iustin Pop
1271 02c521e4 Iustin Pop
  def _VerifyNodeTime(self, ninfo, nresult,
1272 02c521e4 Iustin Pop
                      nvinfo_starttime, nvinfo_endtime):
1273 02c521e4 Iustin Pop
    """Check the node time.
1274 02c521e4 Iustin Pop

1275 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1276 02c521e4 Iustin Pop
    @param ninfo: the node to check
1277 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1278 02c521e4 Iustin Pop
    @param nvinfo_starttime: the start time of the RPC call
1279 02c521e4 Iustin Pop
    @param nvinfo_endtime: the end time of the RPC call
1280 02c521e4 Iustin Pop

1281 02c521e4 Iustin Pop
    """
1282 02c521e4 Iustin Pop
    node = ninfo.name
1283 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1284 02c521e4 Iustin Pop
1285 02c521e4 Iustin Pop
    ntime = nresult.get(constants.NV_TIME, None)
1286 02c521e4 Iustin Pop
    try:
1287 02c521e4 Iustin Pop
      ntime_merged = utils.MergeTime(ntime)
1288 02c521e4 Iustin Pop
    except (ValueError, TypeError):
1289 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1290 02c521e4 Iustin Pop
      return
1291 02c521e4 Iustin Pop
1292 02c521e4 Iustin Pop
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1293 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1294 02c521e4 Iustin Pop
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1295 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1296 02c521e4 Iustin Pop
    else:
1297 02c521e4 Iustin Pop
      ntime_diff = None
1298 02c521e4 Iustin Pop
1299 02c521e4 Iustin Pop
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1300 02c521e4 Iustin Pop
             "Node time diverges by at least %s from master node time",
1301 02c521e4 Iustin Pop
             ntime_diff)
1302 02c521e4 Iustin Pop
1303 02c521e4 Iustin Pop
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1304 02c521e4 Iustin Pop
    """Check the node time.
1305 02c521e4 Iustin Pop

1306 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1307 02c521e4 Iustin Pop
    @param ninfo: the node to check
1308 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1309 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1310 02c521e4 Iustin Pop

1311 02c521e4 Iustin Pop
    """
1312 02c521e4 Iustin Pop
    if vg_name is None:
1313 02c521e4 Iustin Pop
      return
1314 02c521e4 Iustin Pop
1315 02c521e4 Iustin Pop
    node = ninfo.name
1316 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1317 02c521e4 Iustin Pop
1318 02c521e4 Iustin Pop
    # checks vg existence and size > 20G
1319 02c521e4 Iustin Pop
    vglist = nresult.get(constants.NV_VGLIST, None)
1320 02c521e4 Iustin Pop
    test = not vglist
1321 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1322 02c521e4 Iustin Pop
    if not test:
1323 02c521e4 Iustin Pop
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1324 02c521e4 Iustin Pop
                                            constants.MIN_VG_SIZE)
1325 02c521e4 Iustin Pop
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1326 02c521e4 Iustin Pop
1327 02c521e4 Iustin Pop
    # check pv names
1328 02c521e4 Iustin Pop
    pvlist = nresult.get(constants.NV_PVLIST, None)
1329 02c521e4 Iustin Pop
    test = pvlist is None
1330 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1331 a0c9776a Iustin Pop
    if not test:
1332 02c521e4 Iustin Pop
      # check that ':' is not present in PV names, since it's a
1333 02c521e4 Iustin Pop
      # special character for lvcreate (denotes the range of PEs to
1334 02c521e4 Iustin Pop
      # use on the PV)
1335 02c521e4 Iustin Pop
      for _, pvname, owner_vg in pvlist:
1336 02c521e4 Iustin Pop
        test = ":" in pvname
1337 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1338 02c521e4 Iustin Pop
                 " '%s' of VG '%s'", pvname, owner_vg)
1339 02c521e4 Iustin Pop
1340 02c521e4 Iustin Pop
  def _VerifyNodeNetwork(self, ninfo, nresult):
1341 02c521e4 Iustin Pop
    """Check the node time.
1342 02c521e4 Iustin Pop

1343 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1344 02c521e4 Iustin Pop
    @param ninfo: the node to check
1345 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1346 02c521e4 Iustin Pop

1347 02c521e4 Iustin Pop
    """
1348 02c521e4 Iustin Pop
    node = ninfo.name
1349 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1350 02c521e4 Iustin Pop
1351 02c521e4 Iustin Pop
    test = constants.NV_NODELIST not in nresult
1352 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1353 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1354 a0c9776a Iustin Pop
    if not test:
1355 02c521e4 Iustin Pop
      if nresult[constants.NV_NODELIST]:
1356 02c521e4 Iustin Pop
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1357 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1358 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1359 25361b9a Iustin Pop
1360 02c521e4 Iustin Pop
    test = constants.NV_NODENETTEST not in nresult
1361 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1362 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1363 a0c9776a Iustin Pop
    if not test:
1364 02c521e4 Iustin Pop
      if nresult[constants.NV_NODENETTEST]:
1365 02c521e4 Iustin Pop
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1366 7c874ee1 Iustin Pop
        for anode in nlist:
1367 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1368 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1369 02c521e4 Iustin Pop
                   anode, nresult[constants.NV_NODENETTEST][anode])
1370 a8083063 Iustin Pop
1371 02c521e4 Iustin Pop
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1372 a8083063 Iustin Pop
    """Verify an instance.
1373 a8083063 Iustin Pop

1374 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1375 a8083063 Iustin Pop
    available on the instance's node.
1376 a8083063 Iustin Pop

1377 a8083063 Iustin Pop
    """
1378 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1379 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1380 a8083063 Iustin Pop
1381 a8083063 Iustin Pop
    node_vol_should = {}
1382 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1383 a8083063 Iustin Pop
1384 a8083063 Iustin Pop
    for node in node_vol_should:
1385 02c521e4 Iustin Pop
      n_img = node_image[node]
1386 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1387 02c521e4 Iustin Pop
        # ignore missing volumes on offline or broken nodes
1388 0a66c968 Iustin Pop
        continue
1389 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1390 02c521e4 Iustin Pop
        test = volume not in n_img.volumes
1391 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1392 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1393 a8083063 Iustin Pop
1394 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1395 02c521e4 Iustin Pop
      pri_img = node_image[node_current]
1396 02c521e4 Iustin Pop
      test = instance not in pri_img.instances and not pri_img.offline
1397 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1398 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1399 a0c9776a Iustin Pop
               node_current)
1400 a8083063 Iustin Pop
1401 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1402 a8083063 Iustin Pop
      if (not node == node_current):
1403 02c521e4 Iustin Pop
        test = instance in n_img.instances
1404 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1405 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1406 a8083063 Iustin Pop
1407 02c521e4 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1408 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1409 a8083063 Iustin Pop

1410 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1411 a8083063 Iustin Pop
    reported as unknown.
1412 a8083063 Iustin Pop

1413 a8083063 Iustin Pop
    """
1414 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1415 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1416 02c521e4 Iustin Pop
        # skip non-healthy nodes
1417 02c521e4 Iustin Pop
        continue
1418 02c521e4 Iustin Pop
      for volume in n_img.volumes:
1419 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1420 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1421 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1422 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1423 a8083063 Iustin Pop
1424 02c521e4 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_image):
1425 a8083063 Iustin Pop
    """Verify the list of running instances.
1426 a8083063 Iustin Pop

1427 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1428 a8083063 Iustin Pop

1429 a8083063 Iustin Pop
    """
1430 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1431 02c521e4 Iustin Pop
      for o_inst in n_img.instances:
1432 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1433 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1434 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1435 a8083063 Iustin Pop
1436 02c521e4 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1437 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1438 2b3b6ddd Guido Trotter

1439 02c521e4 Iustin Pop
    Check that if one single node dies we can still start all the
1440 02c521e4 Iustin Pop
    instances it was primary for.
1441 2b3b6ddd Guido Trotter

1442 2b3b6ddd Guido Trotter
    """
1443 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1444 02c521e4 Iustin Pop
      # This code checks that every node which is now listed as
1445 02c521e4 Iustin Pop
      # secondary has enough memory to host all instances it is
1446 02c521e4 Iustin Pop
      # supposed to should a single other node in the cluster fail.
1447 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1448 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1449 02c521e4 Iustin Pop
      # WARNING: we currently take into account down instances as well
1450 02c521e4 Iustin Pop
      # as up ones, considering that even if they're down someone
1451 02c521e4 Iustin Pop
      # might want to start them even in the event of a node failure.
1452 02c521e4 Iustin Pop
      for prinode, instances in n_img.sbp.items():
1453 2b3b6ddd Guido Trotter
        needed_mem = 0
1454 2b3b6ddd Guido Trotter
        for instance in instances:
1455 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1456 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1457 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1458 02c521e4 Iustin Pop
        test = n_img.mfree < needed_mem
1459 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1460 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1461 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1462 2b3b6ddd Guido Trotter
1463 02c521e4 Iustin Pop
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1464 02c521e4 Iustin Pop
                       master_files):
1465 02c521e4 Iustin Pop
    """Verifies and computes the node required file checksums.
1466 02c521e4 Iustin Pop

1467 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1468 02c521e4 Iustin Pop
    @param ninfo: the node to check
1469 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1470 02c521e4 Iustin Pop
    @param file_list: required list of files
1471 02c521e4 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
1472 02c521e4 Iustin Pop
    @param master_files: list of files that only masters should have
1473 02c521e4 Iustin Pop

1474 02c521e4 Iustin Pop
    """
1475 02c521e4 Iustin Pop
    node = ninfo.name
1476 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1477 02c521e4 Iustin Pop
1478 02c521e4 Iustin Pop
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1479 02c521e4 Iustin Pop
    test = not isinstance(remote_cksum, dict)
1480 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1481 02c521e4 Iustin Pop
             "node hasn't returned file checksum data")
1482 02c521e4 Iustin Pop
    if test:
1483 02c521e4 Iustin Pop
      return
1484 02c521e4 Iustin Pop
1485 02c521e4 Iustin Pop
    for file_name in file_list:
1486 02c521e4 Iustin Pop
      node_is_mc = ninfo.master_candidate
1487 02c521e4 Iustin Pop
      must_have = (file_name not in master_files) or node_is_mc
1488 02c521e4 Iustin Pop
      # missing
1489 02c521e4 Iustin Pop
      test1 = file_name not in remote_cksum
1490 02c521e4 Iustin Pop
      # invalid checksum
1491 02c521e4 Iustin Pop
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1492 02c521e4 Iustin Pop
      # existing and good
1493 02c521e4 Iustin Pop
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1494 02c521e4 Iustin Pop
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1495 02c521e4 Iustin Pop
               "file '%s' missing", file_name)
1496 02c521e4 Iustin Pop
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1497 02c521e4 Iustin Pop
               "file '%s' has wrong checksum", file_name)
1498 02c521e4 Iustin Pop
      # not candidate and this is not a must-have file
1499 02c521e4 Iustin Pop
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1500 02c521e4 Iustin Pop
               "file '%s' should not exist on non master"
1501 02c521e4 Iustin Pop
               " candidates (and the file is outdated)", file_name)
1502 02c521e4 Iustin Pop
      # all good, except non-master/non-must have combination
1503 02c521e4 Iustin Pop
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1504 02c521e4 Iustin Pop
               "file '%s' should not exist"
1505 02c521e4 Iustin Pop
               " on non master candidates", file_name)
1506 02c521e4 Iustin Pop
1507 02c521e4 Iustin Pop
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1508 02c521e4 Iustin Pop
    """Verifies and the node DRBD status.
1509 02c521e4 Iustin Pop

1510 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1511 02c521e4 Iustin Pop
    @param ninfo: the node to check
1512 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1513 02c521e4 Iustin Pop
    @param instanceinfo: the dict of instances
1514 02c521e4 Iustin Pop
    @param drbd_map: the DRBD map as returned by
1515 02c521e4 Iustin Pop
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1516 02c521e4 Iustin Pop

1517 02c521e4 Iustin Pop
    """
1518 02c521e4 Iustin Pop
    node = ninfo.name
1519 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1520 02c521e4 Iustin Pop
1521 02c521e4 Iustin Pop
    # compute the DRBD minors
1522 02c521e4 Iustin Pop
    node_drbd = {}
1523 02c521e4 Iustin Pop
    for minor, instance in drbd_map[node].items():
1524 02c521e4 Iustin Pop
      test = instance not in instanceinfo
1525 02c521e4 Iustin Pop
      _ErrorIf(test, self.ECLUSTERCFG, None,
1526 02c521e4 Iustin Pop
               "ghost instance '%s' in temporary DRBD map", instance)
1527 02c521e4 Iustin Pop
        # ghost instance should not be running, but otherwise we
1528 02c521e4 Iustin Pop
        # don't give double warnings (both ghost instance and
1529 02c521e4 Iustin Pop
        # unallocated minor in use)
1530 02c521e4 Iustin Pop
      if test:
1531 02c521e4 Iustin Pop
        node_drbd[minor] = (instance, False)
1532 02c521e4 Iustin Pop
      else:
1533 02c521e4 Iustin Pop
        instance = instanceinfo[instance]
1534 02c521e4 Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
1535 02c521e4 Iustin Pop
1536 02c521e4 Iustin Pop
    # and now check them
1537 02c521e4 Iustin Pop
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1538 02c521e4 Iustin Pop
    test = not isinstance(used_minors, (tuple, list))
1539 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEDRBD, node,
1540 02c521e4 Iustin Pop
             "cannot parse drbd status file: %s", str(used_minors))
1541 02c521e4 Iustin Pop
    if test:
1542 02c521e4 Iustin Pop
      # we cannot check drbd status
1543 02c521e4 Iustin Pop
      return
1544 02c521e4 Iustin Pop
1545 02c521e4 Iustin Pop
    for minor, (iname, must_exist) in node_drbd.items():
1546 02c521e4 Iustin Pop
      test = minor not in used_minors and must_exist
1547 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1548 02c521e4 Iustin Pop
               "drbd minor %d of instance %s is not active", minor, iname)
1549 02c521e4 Iustin Pop
    for minor in used_minors:
1550 02c521e4 Iustin Pop
      test = minor not in node_drbd
1551 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1552 02c521e4 Iustin Pop
               "unallocated drbd minor %d is in use", minor)
1553 02c521e4 Iustin Pop
1554 02c521e4 Iustin Pop
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1555 02c521e4 Iustin Pop
    """Verifies and updates the node volume data.
1556 02c521e4 Iustin Pop

1557 02c521e4 Iustin Pop
    This function will update a L{NodeImage}'s internal structures
1558 02c521e4 Iustin Pop
    with data from the remote call.
1559 02c521e4 Iustin Pop

1560 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1561 02c521e4 Iustin Pop
    @param ninfo: the node to check
1562 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1563 02c521e4 Iustin Pop
    @param nimg: the node image object
1564 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1565 02c521e4 Iustin Pop

1566 02c521e4 Iustin Pop
    """
1567 02c521e4 Iustin Pop
    node = ninfo.name
1568 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1569 02c521e4 Iustin Pop
1570 02c521e4 Iustin Pop
    nimg.lvm_fail = True
1571 02c521e4 Iustin Pop
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1572 02c521e4 Iustin Pop
    if vg_name is None:
1573 02c521e4 Iustin Pop
      pass
1574 02c521e4 Iustin Pop
    elif isinstance(lvdata, basestring):
1575 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1576 02c521e4 Iustin Pop
               utils.SafeEncode(lvdata))
1577 02c521e4 Iustin Pop
    elif not isinstance(lvdata, dict):
1578 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1579 02c521e4 Iustin Pop
    else:
1580 02c521e4 Iustin Pop
      nimg.volumes = lvdata
1581 02c521e4 Iustin Pop
      nimg.lvm_fail = False
1582 02c521e4 Iustin Pop
1583 02c521e4 Iustin Pop
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1584 02c521e4 Iustin Pop
    """Verifies and updates the node instance list.
1585 02c521e4 Iustin Pop

1586 02c521e4 Iustin Pop
    If the listing was successful, then updates this node's instance
1587 02c521e4 Iustin Pop
    list. Otherwise, it marks the RPC call as failed for the instance
1588 02c521e4 Iustin Pop
    list key.
1589 02c521e4 Iustin Pop

1590 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1591 02c521e4 Iustin Pop
    @param ninfo: the node to check
1592 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1593 02c521e4 Iustin Pop
    @param nimg: the node image object
1594 02c521e4 Iustin Pop

1595 02c521e4 Iustin Pop
    """
1596 02c521e4 Iustin Pop
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1597 02c521e4 Iustin Pop
    test = not isinstance(idata, list)
1598 02c521e4 Iustin Pop
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1599 02c521e4 Iustin Pop
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1600 02c521e4 Iustin Pop
    if test:
1601 02c521e4 Iustin Pop
      nimg.hyp_fail = True
1602 02c521e4 Iustin Pop
    else:
1603 02c521e4 Iustin Pop
      nimg.instances = idata
1604 02c521e4 Iustin Pop
1605 02c521e4 Iustin Pop
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1606 02c521e4 Iustin Pop
    """Verifies and computes a node information map
1607 02c521e4 Iustin Pop

1608 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1609 02c521e4 Iustin Pop
    @param ninfo: the node to check
1610 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1611 02c521e4 Iustin Pop
    @param nimg: the node image object
1612 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1613 02c521e4 Iustin Pop

1614 02c521e4 Iustin Pop
    """
1615 02c521e4 Iustin Pop
    node = ninfo.name
1616 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1617 02c521e4 Iustin Pop
1618 02c521e4 Iustin Pop
    # try to read free memory (from the hypervisor)
1619 02c521e4 Iustin Pop
    hv_info = nresult.get(constants.NV_HVINFO, None)
1620 02c521e4 Iustin Pop
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1621 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1622 02c521e4 Iustin Pop
    if not test:
1623 02c521e4 Iustin Pop
      try:
1624 02c521e4 Iustin Pop
        nimg.mfree = int(hv_info["memory_free"])
1625 02c521e4 Iustin Pop
      except (ValueError, TypeError):
1626 02c521e4 Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1627 02c521e4 Iustin Pop
                 "node returned invalid nodeinfo, check hypervisor")
1628 02c521e4 Iustin Pop
1629 02c521e4 Iustin Pop
    # FIXME: devise a free space model for file based instances as well
1630 02c521e4 Iustin Pop
    if vg_name is not None:
1631 02c521e4 Iustin Pop
      test = (constants.NV_VGLIST not in nresult or
1632 02c521e4 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST])
1633 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODELVM, node,
1634 02c521e4 Iustin Pop
               "node didn't return data for the volume group '%s'"
1635 02c521e4 Iustin Pop
               " - it is either missing or broken", vg_name)
1636 02c521e4 Iustin Pop
      if not test:
1637 02c521e4 Iustin Pop
        try:
1638 02c521e4 Iustin Pop
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1639 02c521e4 Iustin Pop
        except (ValueError, TypeError):
1640 02c521e4 Iustin Pop
          _ErrorIf(True, self.ENODERPC, node,
1641 02c521e4 Iustin Pop
                   "node returned invalid LVM info, check LVM status")
1642 02c521e4 Iustin Pop
1643 a8083063 Iustin Pop
  def CheckPrereq(self):
1644 a8083063 Iustin Pop
    """Check prerequisites.
1645 a8083063 Iustin Pop

1646 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1647 e54c4c5e Guido Trotter
    all its members are valid.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    """
1650 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1651 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1652 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1653 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1654 a8083063 Iustin Pop
1655 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1656 d8fff41c Guido Trotter
    """Build hooks env.
1657 d8fff41c Guido Trotter

1658 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1659 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1660 d8fff41c Guido Trotter

1661 d8fff41c Guido Trotter
    """
1662 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1663 35e994e9 Iustin Pop
    env = {
1664 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1665 35e994e9 Iustin Pop
      }
1666 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1667 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1668 35e994e9 Iustin Pop
1669 d8fff41c Guido Trotter
    return env, [], all_nodes
1670 d8fff41c Guido Trotter
1671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1672 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1673 a8083063 Iustin Pop

1674 a8083063 Iustin Pop
    """
1675 a0c9776a Iustin Pop
    self.bad = False
1676 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1677 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1678 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1679 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1680 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1681 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1682 a8083063 Iustin Pop
1683 b98bf262 Michael Hanselmann
    # Check the cluster certificates
1684 b98bf262 Michael Hanselmann
    for cert_filename in constants.ALL_CERT_FILES:
1685 b98bf262 Michael Hanselmann
      (errcode, msg) = _VerifyCertificate(cert_filename)
1686 b98bf262 Michael Hanselmann
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1687 b98bf262 Michael Hanselmann
1688 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1689 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1690 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1691 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1692 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1693 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1694 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1695 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1696 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1697 02c521e4 Iustin Pop
    n_offline = 0 # Count of offline nodes
1698 02c521e4 Iustin Pop
    n_drained = 0 # Count of nodes being drained
1699 02c521e4 Iustin Pop
    node_vol_should = {}
1700 a8083063 Iustin Pop
1701 a8083063 Iustin Pop
    # FIXME: verify OS list
1702 a8083063 Iustin Pop
    # do local checksums
1703 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1704 112f18a5 Iustin Pop
1705 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1706 d3100055 Michael Hanselmann
    file_names.extend(constants.ALL_CERT_FILES)
1707 112f18a5 Iustin Pop
    file_names.extend(master_files)
1708 112f18a5 Iustin Pop
1709 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1710 a8083063 Iustin Pop
1711 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1712 a8083063 Iustin Pop
    node_verify_param = {
1713 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1714 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1715 82e37788 Iustin Pop
                              if not node.offline],
1716 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1717 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1718 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1719 82e37788 Iustin Pop
                                 if not node.offline],
1720 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1721 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1722 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1723 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1724 313b2dd4 Michael Hanselmann
      constants.NV_TIME: None,
1725 a8083063 Iustin Pop
      }
1726 313b2dd4 Michael Hanselmann
1727 cc9e1230 Guido Trotter
    if vg_name is not None:
1728 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1729 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1730 d091393e Iustin Pop
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1731 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1732 313b2dd4 Michael Hanselmann
1733 02c521e4 Iustin Pop
    # Build our expected cluster state
1734 02c521e4 Iustin Pop
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1735 02c521e4 Iustin Pop
                      for node in nodeinfo)
1736 02c521e4 Iustin Pop
1737 02c521e4 Iustin Pop
    for instance in instancelist:
1738 02c521e4 Iustin Pop
      inst_config = instanceinfo[instance]
1739 02c521e4 Iustin Pop
1740 02c521e4 Iustin Pop
      for nname in inst_config.all_nodes:
1741 02c521e4 Iustin Pop
        if nname not in node_image:
1742 02c521e4 Iustin Pop
          # ghost node
1743 02c521e4 Iustin Pop
          gnode = self.NodeImage()
1744 02c521e4 Iustin Pop
          gnode.ghost = True
1745 02c521e4 Iustin Pop
          node_image[nname] = gnode
1746 02c521e4 Iustin Pop
1747 02c521e4 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1748 02c521e4 Iustin Pop
1749 02c521e4 Iustin Pop
      pnode = inst_config.primary_node
1750 02c521e4 Iustin Pop
      node_image[pnode].pinst.append(instance)
1751 02c521e4 Iustin Pop
1752 02c521e4 Iustin Pop
      for snode in inst_config.secondary_nodes:
1753 02c521e4 Iustin Pop
        nimg = node_image[snode]
1754 02c521e4 Iustin Pop
        nimg.sinst.append(instance)
1755 02c521e4 Iustin Pop
        if pnode not in nimg.sbp:
1756 02c521e4 Iustin Pop
          nimg.sbp[pnode] = []
1757 02c521e4 Iustin Pop
        nimg.sbp[pnode].append(instance)
1758 02c521e4 Iustin Pop
1759 02c521e4 Iustin Pop
    # At this point, we have the in-memory data structures complete,
1760 02c521e4 Iustin Pop
    # except for the runtime information, which we'll gather next
1761 02c521e4 Iustin Pop
1762 313b2dd4 Michael Hanselmann
    # Due to the way our RPC system works, exact response times cannot be
1763 313b2dd4 Michael Hanselmann
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1764 313b2dd4 Michael Hanselmann
    # time before and after executing the request, we can at least have a time
1765 313b2dd4 Michael Hanselmann
    # window.
1766 313b2dd4 Michael Hanselmann
    nvinfo_starttime = time.time()
1767 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1768 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1769 313b2dd4 Michael Hanselmann
    nvinfo_endtime = time.time()
1770 a8083063 Iustin Pop
1771 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1772 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1773 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1774 6d2e83d5 Iustin Pop
1775 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1776 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1777 112f18a5 Iustin Pop
      node = node_i.name
1778 02c521e4 Iustin Pop
      nimg = node_image[node]
1779 25361b9a Iustin Pop
1780 0a66c968 Iustin Pop
      if node_i.offline:
1781 7c874ee1 Iustin Pop
        if verbose:
1782 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1783 02c521e4 Iustin Pop
        n_offline += 1
1784 0a66c968 Iustin Pop
        continue
1785 0a66c968 Iustin Pop
1786 112f18a5 Iustin Pop
      if node == master_node:
1787 25361b9a Iustin Pop
        ntype = "master"
1788 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1789 25361b9a Iustin Pop
        ntype = "master candidate"
1790 22f0f71d Iustin Pop
      elif node_i.drained:
1791 22f0f71d Iustin Pop
        ntype = "drained"
1792 02c521e4 Iustin Pop
        n_drained += 1
1793 112f18a5 Iustin Pop
      else:
1794 25361b9a Iustin Pop
        ntype = "regular"
1795 7c874ee1 Iustin Pop
      if verbose:
1796 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1797 25361b9a Iustin Pop
1798 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1799 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1800 6f68a739 Iustin Pop
      if msg:
1801 02c521e4 Iustin Pop
        nimg.rpc_fail = True
1802 25361b9a Iustin Pop
        continue
1803 25361b9a Iustin Pop
1804 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1805 a8083063 Iustin Pop
1806 02c521e4 Iustin Pop
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1807 02c521e4 Iustin Pop
      self._VerifyNodeNetwork(node_i, nresult)
1808 02c521e4 Iustin Pop
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1809 02c521e4 Iustin Pop
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1810 02c521e4 Iustin Pop
                            master_files)
1811 02c521e4 Iustin Pop
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1812 02c521e4 Iustin Pop
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1813 a8083063 Iustin Pop
1814 02c521e4 Iustin Pop
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1815 02c521e4 Iustin Pop
      self._UpdateNodeInstances(node_i, nresult, nimg)
1816 02c521e4 Iustin Pop
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1817 a8083063 Iustin Pop
1818 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1819 a8083063 Iustin Pop
    for instance in instancelist:
1820 7c874ee1 Iustin Pop
      if verbose:
1821 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1822 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1823 02c521e4 Iustin Pop
      self._VerifyInstance(instance, inst_config, node_image)
1824 832261fd Iustin Pop
      inst_nodes_offline = []
1825 a8083063 Iustin Pop
1826 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1827 02c521e4 Iustin Pop
      pnode_img = node_image[pnode]
1828 02c521e4 Iustin Pop
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1829 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1830 a0c9776a Iustin Pop
               " primary node failed", instance)
1831 93e4c50b Guido Trotter
1832 02c521e4 Iustin Pop
      if pnode_img.offline:
1833 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1834 832261fd Iustin Pop
1835 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1836 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1837 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1838 93e4c50b Guido Trotter
      # supported either.
1839 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1840 02c521e4 Iustin Pop
      if not inst_config.secondary_nodes:
1841 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1842 02c521e4 Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1843 02c521e4 Iustin Pop
               instance, "instance has multiple secondary nodes: %s",
1844 02c521e4 Iustin Pop
               utils.CommaJoin(inst_config.secondary_nodes),
1845 02c521e4 Iustin Pop
               code=self.ETYPE_WARNING)
1846 93e4c50b Guido Trotter
1847 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1848 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1849 3924700f Iustin Pop
1850 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1851 02c521e4 Iustin Pop
        s_img = node_image[snode]
1852 02c521e4 Iustin Pop
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1853 02c521e4 Iustin Pop
                 "instance %s, connection to secondary node failed", instance)
1854 02c521e4 Iustin Pop
1855 02c521e4 Iustin Pop
        if s_img.offline:
1856 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1857 832261fd Iustin Pop
1858 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1859 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1860 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1861 1f864b60 Iustin Pop
               utils.CommaJoin(inst_nodes_offline))
1862 02c521e4 Iustin Pop
      # ... or ghost nodes
1863 02c521e4 Iustin Pop
      for node in inst_config.all_nodes:
1864 02c521e4 Iustin Pop
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1865 02c521e4 Iustin Pop
                 "instance lives on ghost node %s", node)
1866 93e4c50b Guido Trotter
1867 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1868 02c521e4 Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1869 a8083063 Iustin Pop
1870 02c521e4 Iustin Pop
    feedback_fn("* Verifying oprhan instances")
1871 02c521e4 Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_image)
1872 a8083063 Iustin Pop
1873 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1874 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1875 02c521e4 Iustin Pop
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1876 2b3b6ddd Guido Trotter
1877 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1878 2b3b6ddd Guido Trotter
    if i_non_redundant:
1879 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1880 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1881 2b3b6ddd Guido Trotter
1882 3924700f Iustin Pop
    if i_non_a_balanced:
1883 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1884 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1885 3924700f Iustin Pop
1886 0a66c968 Iustin Pop
    if n_offline:
1887 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1888 0a66c968 Iustin Pop
1889 22f0f71d Iustin Pop
    if n_drained:
1890 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1891 22f0f71d Iustin Pop
1892 a0c9776a Iustin Pop
    return not self.bad
1893 a8083063 Iustin Pop
1894 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1895 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1896 e4376078 Iustin Pop

1897 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1898 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1899 d8fff41c Guido Trotter

1900 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1901 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1902 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1903 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1904 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1905 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1906 e4376078 Iustin Pop
        and hook results
1907 d8fff41c Guido Trotter

1908 d8fff41c Guido Trotter
    """
1909 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1910 38206f3c Iustin Pop
    # their results
1911 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1912 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1913 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1914 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1915 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1916 7c874ee1 Iustin Pop
1917 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1918 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1919 7c874ee1 Iustin Pop
        msg = res.fail_msg
1920 a0c9776a Iustin Pop
        test = msg and not res.offline
1921 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1922 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1923 dd9e9f9c Michael Hanselmann
        if res.offline or msg:
1924 dd9e9f9c Michael Hanselmann
          # No need to investigate payload if node is offline or gave an error.
1925 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1926 a0c9776a Iustin Pop
          # overrides self.bad
1927 7c874ee1 Iustin Pop
          lu_result = 1
1928 7c874ee1 Iustin Pop
          continue
1929 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1930 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1931 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1932 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1933 a0c9776a Iustin Pop
          if test:
1934 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1935 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1936 6d7b472a Iustin Pop
            lu_result = 0
1937 d8fff41c Guido Trotter
1938 d8fff41c Guido Trotter
      return lu_result
1939 d8fff41c Guido Trotter
1940 a8083063 Iustin Pop
1941 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1942 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1943 2c95a8d4 Iustin Pop

1944 2c95a8d4 Iustin Pop
  """
1945 2c95a8d4 Iustin Pop
  _OP_REQP = []
1946 d4b9d97f Guido Trotter
  REQ_BGL = False
1947 d4b9d97f Guido Trotter
1948 d4b9d97f Guido Trotter
  def ExpandNames(self):
1949 d4b9d97f Guido Trotter
    self.needed_locks = {
1950 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1951 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1952 d4b9d97f Guido Trotter
    }
1953 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1954 2c95a8d4 Iustin Pop
1955 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1956 2c95a8d4 Iustin Pop
    """Check prerequisites.
1957 2c95a8d4 Iustin Pop

1958 2c95a8d4 Iustin Pop
    This has no prerequisites.
1959 2c95a8d4 Iustin Pop

1960 2c95a8d4 Iustin Pop
    """
1961 2c95a8d4 Iustin Pop
    pass
1962 2c95a8d4 Iustin Pop
1963 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1964 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1965 2c95a8d4 Iustin Pop

1966 29d376ec Iustin Pop
    @rtype: tuple of three items
1967 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1968 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1969 29d376ec Iustin Pop
        missing volumes
1970 29d376ec Iustin Pop

1971 2c95a8d4 Iustin Pop
    """
1972 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1973 2c95a8d4 Iustin Pop
1974 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1975 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1976 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1977 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1978 2c95a8d4 Iustin Pop
1979 2c95a8d4 Iustin Pop
    nv_dict = {}
1980 2c95a8d4 Iustin Pop
    for inst in instances:
1981 2c95a8d4 Iustin Pop
      inst_lvs = {}
1982 0d68c45d Iustin Pop
      if (not inst.admin_up or
1983 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1984 2c95a8d4 Iustin Pop
        continue
1985 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1986 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1987 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1988 2c95a8d4 Iustin Pop
        for vol in vol_list:
1989 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1990 2c95a8d4 Iustin Pop
1991 2c95a8d4 Iustin Pop
    if not nv_dict:
1992 2c95a8d4 Iustin Pop
      return result
1993 2c95a8d4 Iustin Pop
1994 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1995 2c95a8d4 Iustin Pop
1996 2c95a8d4 Iustin Pop
    for node in nodes:
1997 2c95a8d4 Iustin Pop
      # node_volume
1998 29d376ec Iustin Pop
      node_res = node_lvs[node]
1999 29d376ec Iustin Pop
      if node_res.offline:
2000 ea9ddc07 Iustin Pop
        continue
2001 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
2002 29d376ec Iustin Pop
      if msg:
2003 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2004 29d376ec Iustin Pop
        res_nodes[node] = msg
2005 2c95a8d4 Iustin Pop
        continue
2006 2c95a8d4 Iustin Pop
2007 29d376ec Iustin Pop
      lvs = node_res.payload
2008 1122eb25 Iustin Pop
      for lv_name, (_, _, lv_online) in lvs.items():
2009 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
2010 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
2011 b63ed789 Iustin Pop
            and inst.name not in res_instances):
2012 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
2013 2c95a8d4 Iustin Pop
2014 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
2015 b63ed789 Iustin Pop
    # data better
2016 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
2017 b63ed789 Iustin Pop
      if inst.name not in res_missing:
2018 b63ed789 Iustin Pop
        res_missing[inst.name] = []
2019 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
2020 b63ed789 Iustin Pop
2021 2c95a8d4 Iustin Pop
    return result
2022 2c95a8d4 Iustin Pop
2023 2c95a8d4 Iustin Pop
2024 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
2025 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
2026 60975797 Iustin Pop

2027 60975797 Iustin Pop
  """
2028 60975797 Iustin Pop
  _OP_REQP = ["instances"]
2029 60975797 Iustin Pop
  REQ_BGL = False
2030 60975797 Iustin Pop
2031 60975797 Iustin Pop
  def ExpandNames(self):
2032 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
2033 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2034 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2035 60975797 Iustin Pop
2036 60975797 Iustin Pop
    if self.op.instances:
2037 60975797 Iustin Pop
      self.wanted_names = []
2038 60975797 Iustin Pop
      for name in self.op.instances:
2039 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
2040 60975797 Iustin Pop
        self.wanted_names.append(full_name)
2041 60975797 Iustin Pop
      self.needed_locks = {
2042 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
2043 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
2044 60975797 Iustin Pop
        }
2045 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2046 60975797 Iustin Pop
    else:
2047 60975797 Iustin Pop
      self.wanted_names = None
2048 60975797 Iustin Pop
      self.needed_locks = {
2049 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
2050 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2051 60975797 Iustin Pop
        }
2052 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2053 60975797 Iustin Pop
2054 60975797 Iustin Pop
  def DeclareLocks(self, level):
2055 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2056 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
2057 60975797 Iustin Pop
2058 60975797 Iustin Pop
  def CheckPrereq(self):
2059 60975797 Iustin Pop
    """Check prerequisites.
2060 60975797 Iustin Pop

2061 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
2062 60975797 Iustin Pop

2063 60975797 Iustin Pop
    """
2064 60975797 Iustin Pop
    if self.wanted_names is None:
2065 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2066 60975797 Iustin Pop
2067 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2068 60975797 Iustin Pop
                             in self.wanted_names]
2069 60975797 Iustin Pop
2070 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
2071 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
2072 b775c337 Iustin Pop

2073 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
2074 b775c337 Iustin Pop
    children have smaller disk size.
2075 b775c337 Iustin Pop

2076 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
2077 b775c337 Iustin Pop

2078 b775c337 Iustin Pop
    """
2079 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
2080 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
2081 b775c337 Iustin Pop
      fchild = disk.children[0]
2082 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
2083 b775c337 Iustin Pop
      if mismatch:
2084 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2085 b775c337 Iustin Pop
                     fchild.size, disk.size)
2086 b775c337 Iustin Pop
        fchild.size = disk.size
2087 b775c337 Iustin Pop
2088 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
2089 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
2090 b775c337 Iustin Pop
    else:
2091 b775c337 Iustin Pop
      return False
2092 b775c337 Iustin Pop
2093 60975797 Iustin Pop
  def Exec(self, feedback_fn):
2094 60975797 Iustin Pop
    """Verify the size of cluster disks.
2095 60975797 Iustin Pop

2096 60975797 Iustin Pop
    """
2097 60975797 Iustin Pop
    # TODO: check child disks too
2098 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
2099 60975797 Iustin Pop
    per_node_disks = {}
2100 60975797 Iustin Pop
    for instance in self.wanted_instances:
2101 60975797 Iustin Pop
      pnode = instance.primary_node
2102 60975797 Iustin Pop
      if pnode not in per_node_disks:
2103 60975797 Iustin Pop
        per_node_disks[pnode] = []
2104 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
2105 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
2106 60975797 Iustin Pop
2107 60975797 Iustin Pop
    changed = []
2108 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
2109 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
2110 4d9e6835 Iustin Pop
      for dsk in newl:
2111 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
2112 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
2113 3cebe102 Michael Hanselmann
      if result.fail_msg:
2114 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
2115 60975797 Iustin Pop
                        " %s, ignoring", node)
2116 60975797 Iustin Pop
        continue
2117 60975797 Iustin Pop
      if len(result.data) != len(dskl):
2118 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
2119 60975797 Iustin Pop
                        node)
2120 60975797 Iustin Pop
        continue
2121 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2122 60975797 Iustin Pop
        if size is None:
2123 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
2124 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
2125 60975797 Iustin Pop
          continue
2126 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
2127 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
2128 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
2129 60975797 Iustin Pop
          continue
2130 60975797 Iustin Pop
        size = size >> 20
2131 60975797 Iustin Pop
        if size != disk.size:
2132 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2133 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
2134 60975797 Iustin Pop
                       instance.name, disk.size, size)
2135 60975797 Iustin Pop
          disk.size = size
2136 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2137 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
2138 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
2139 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2140 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
2141 60975797 Iustin Pop
    return changed
2142 60975797 Iustin Pop
2143 60975797 Iustin Pop
2144 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
2145 07bd8a51 Iustin Pop
  """Rename the cluster.
2146 07bd8a51 Iustin Pop

2147 07bd8a51 Iustin Pop
  """
2148 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
2149 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
2150 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
2151 07bd8a51 Iustin Pop
2152 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
2153 07bd8a51 Iustin Pop
    """Build hooks env.
2154 07bd8a51 Iustin Pop

2155 07bd8a51 Iustin Pop
    """
2156 07bd8a51 Iustin Pop
    env = {
2157 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2158 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
2159 07bd8a51 Iustin Pop
      }
2160 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2161 47a72f18 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2162 47a72f18 Iustin Pop
    return env, [mn], all_nodes
2163 07bd8a51 Iustin Pop
2164 07bd8a51 Iustin Pop
  def CheckPrereq(self):
2165 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
2166 07bd8a51 Iustin Pop

2167 07bd8a51 Iustin Pop
    """
2168 104f4ca1 Iustin Pop
    hostname = utils.GetHostInfo(self.op.name)
2169 07bd8a51 Iustin Pop
2170 bcf043c9 Iustin Pop
    new_name = hostname.name
2171 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
2172 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
2173 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
2174 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
2175 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2176 5c983ee5 Iustin Pop
                                 " cluster has changed",
2177 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2178 07bd8a51 Iustin Pop
    if new_ip != old_ip:
2179 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2180 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2181 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
2182 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
2183 07bd8a51 Iustin Pop
2184 07bd8a51 Iustin Pop
    self.op.name = new_name
2185 07bd8a51 Iustin Pop
2186 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
2187 07bd8a51 Iustin Pop
    """Rename the cluster.
2188 07bd8a51 Iustin Pop

2189 07bd8a51 Iustin Pop
    """
2190 07bd8a51 Iustin Pop
    clustername = self.op.name
2191 07bd8a51 Iustin Pop
    ip = self.ip
2192 07bd8a51 Iustin Pop
2193 07bd8a51 Iustin Pop
    # shutdown the master IP
2194 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
2195 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
2196 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
2197 07bd8a51 Iustin Pop
2198 07bd8a51 Iustin Pop
    try:
2199 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2200 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
2201 55cf7d83 Iustin Pop
      cluster.master_ip = ip
2202 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
2203 ec85e3d5 Iustin Pop
2204 ec85e3d5 Iustin Pop
      # update the known hosts file
2205 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2206 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
2207 ec85e3d5 Iustin Pop
      try:
2208 ec85e3d5 Iustin Pop
        node_list.remove(master)
2209 ec85e3d5 Iustin Pop
      except ValueError:
2210 ec85e3d5 Iustin Pop
        pass
2211 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
2212 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
2213 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2214 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2215 6f7d4e75 Iustin Pop
        if msg:
2216 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2217 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2218 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
2219 ec85e3d5 Iustin Pop
2220 07bd8a51 Iustin Pop
    finally:
2221 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
2222 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2223 b726aff0 Iustin Pop
      if msg:
2224 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
2225 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
2226 07bd8a51 Iustin Pop
2227 07bd8a51 Iustin Pop
2228 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
2229 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
2230 8084f9f6 Manuel Franceschini

2231 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
2232 e4376078 Iustin Pop
  @param disk: the disk to check
2233 5bbd3f7f Michael Hanselmann
  @rtype: boolean
2234 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
2235 8084f9f6 Manuel Franceschini

2236 8084f9f6 Manuel Franceschini
  """
2237 8084f9f6 Manuel Franceschini
  if disk.children:
2238 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
2239 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
2240 8084f9f6 Manuel Franceschini
        return True
2241 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
2242 8084f9f6 Manuel Franceschini
2243 8084f9f6 Manuel Franceschini
2244 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
2245 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
2246 8084f9f6 Manuel Franceschini

2247 8084f9f6 Manuel Franceschini
  """
2248 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
2249 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
2250 8084f9f6 Manuel Franceschini
  _OP_REQP = []
2251 c53279cf Guido Trotter
  REQ_BGL = False
2252 c53279cf Guido Trotter
2253 3994f455 Iustin Pop
  def CheckArguments(self):
2254 4b7735f9 Iustin Pop
    """Check parameters
2255 4b7735f9 Iustin Pop

2256 4b7735f9 Iustin Pop
    """
2257 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
2258 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
2259 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2260 4b7735f9 Iustin Pop
      try:
2261 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2262 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
2263 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2264 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
2265 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
2266 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
2267 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2268 3953242f Iustin Pop
    _CheckBooleanOpField(self.op, "maintain_node_health")
2269 4b7735f9 Iustin Pop
2270 c53279cf Guido Trotter
  def ExpandNames(self):
2271 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
2272 c53279cf Guido Trotter
    # all nodes to be modified.
2273 c53279cf Guido Trotter
    self.needed_locks = {
2274 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
2275 c53279cf Guido Trotter
    }
2276 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2277 8084f9f6 Manuel Franceschini
2278 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
2279 8084f9f6 Manuel Franceschini
    """Build hooks env.
2280 8084f9f6 Manuel Franceschini

2281 8084f9f6 Manuel Franceschini
    """
2282 8084f9f6 Manuel Franceschini
    env = {
2283 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2284 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
2285 8084f9f6 Manuel Franceschini
      }
2286 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2287 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
2288 8084f9f6 Manuel Franceschini
2289 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
2290 8084f9f6 Manuel Franceschini
    """Check prerequisites.
2291 8084f9f6 Manuel Franceschini

2292 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
2293 5f83e263 Iustin Pop
    if the given volume group is valid.
2294 8084f9f6 Manuel Franceschini

2295 8084f9f6 Manuel Franceschini
    """
2296 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
2297 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
2298 8084f9f6 Manuel Franceschini
      for inst in instances:
2299 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
2300 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
2301 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2302 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
2303 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
2304 8084f9f6 Manuel Franceschini
2305 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2306 779c15bb Iustin Pop
2307 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
2308 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
2309 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
2310 8084f9f6 Manuel Franceschini
      for node in node_list:
2311 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
2312 e480923b Iustin Pop
        if msg:
2313 781de953 Iustin Pop
          # ignoring down node
2314 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
2315 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
2316 781de953 Iustin Pop
          continue
2317 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2318 781de953 Iustin Pop
                                              self.op.vg_name,
2319 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
2320 8084f9f6 Manuel Franceschini
        if vgstatus:
2321 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
2322 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2323 8084f9f6 Manuel Franceschini
2324 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
2325 5af3da74 Guido Trotter
    # validate params changes
2326 779c15bb Iustin Pop
    if self.op.beparams:
2327 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2328 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
2329 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2330 779c15bb Iustin Pop
2331 5af3da74 Guido Trotter
    if self.op.nicparams:
2332 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2333 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
2334 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2335 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2336 90b704a1 Guido Trotter
      nic_errors = []
2337 90b704a1 Guido Trotter
2338 90b704a1 Guido Trotter
      # check all instances for consistency
2339 90b704a1 Guido Trotter
      for instance in self.cfg.GetAllInstancesInfo().values():
2340 90b704a1 Guido Trotter
        for nic_idx, nic in enumerate(instance.nics):
2341 90b704a1 Guido Trotter
          params_copy = copy.deepcopy(nic.nicparams)
2342 90b704a1 Guido Trotter
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2343 90b704a1 Guido Trotter
2344 90b704a1 Guido Trotter
          # check parameter syntax
2345 90b704a1 Guido Trotter
          try:
2346 90b704a1 Guido Trotter
            objects.NIC.CheckParameterSyntax(params_filled)
2347 90b704a1 Guido Trotter
          except errors.ConfigurationError, err:
2348 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: %s" %
2349 90b704a1 Guido Trotter
                              (instance.name, nic_idx, err))
2350 90b704a1 Guido Trotter
2351 90b704a1 Guido Trotter
          # if we're moving instances to routed, check that they have an ip
2352 90b704a1 Guido Trotter
          target_mode = params_filled[constants.NIC_MODE]
2353 90b704a1 Guido Trotter
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2354 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2355 90b704a1 Guido Trotter
                              (instance.name, nic_idx))
2356 90b704a1 Guido Trotter
      if nic_errors:
2357 90b704a1 Guido Trotter
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2358 90b704a1 Guido Trotter
                                   "\n".join(nic_errors))
2359 5af3da74 Guido Trotter
2360 779c15bb Iustin Pop
    # hypervisor list/parameters
2361 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
2362 779c15bb Iustin Pop
    if self.op.hvparams:
2363 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
2364 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2365 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2366 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
2367 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
2368 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
2369 779c15bb Iustin Pop
        else:
2370 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
2371 779c15bb Iustin Pop
2372 17463d22 Renรฉ Nussbaumer
    # os hypervisor parameters
2373 17463d22 Renรฉ Nussbaumer
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2374 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2375 17463d22 Renรฉ Nussbaumer
      if not isinstance(self.op.os_hvp, dict):
2376 17463d22 Renรฉ Nussbaumer
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2377 17463d22 Renรฉ Nussbaumer
                                   errors.ECODE_INVAL)
2378 17463d22 Renรฉ Nussbaumer
      for os_name, hvs in self.op.os_hvp.items():
2379 17463d22 Renรฉ Nussbaumer
        if not isinstance(hvs, dict):
2380 17463d22 Renรฉ Nussbaumer
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2381 17463d22 Renรฉ Nussbaumer
                                      " input"), errors.ECODE_INVAL)
2382 17463d22 Renรฉ Nussbaumer
        if os_name not in self.new_os_hvp:
2383 17463d22 Renรฉ Nussbaumer
          self.new_os_hvp[os_name] = hvs
2384 17463d22 Renรฉ Nussbaumer
        else:
2385 17463d22 Renรฉ Nussbaumer
          for hv_name, hv_dict in hvs.items():
2386 17463d22 Renรฉ Nussbaumer
            if hv_name not in self.new_os_hvp[os_name]:
2387 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name] = hv_dict
2388 17463d22 Renรฉ Nussbaumer
            else:
2389 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2390 17463d22 Renรฉ Nussbaumer
2391 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2392 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
2393 b119bccb Guido Trotter
      if not self.hv_list:
2394 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2395 5c983ee5 Iustin Pop
                                   " least one member",
2396 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2397 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2398 b119bccb Guido Trotter
      if invalid_hvs:
2399 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2400 ab3e6da8 Iustin Pop
                                   " entries: %s" %
2401 ab3e6da8 Iustin Pop
                                   utils.CommaJoin(invalid_hvs),
2402 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2403 779c15bb Iustin Pop
    else:
2404 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
2405 779c15bb Iustin Pop
2406 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2407 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
2408 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
2409 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2410 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
2411 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
2412 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
2413 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2414 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2415 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
2416 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
2417 779c15bb Iustin Pop
2418 cced4c39 Iustin Pop
    if self.op.os_hvp:
2419 cced4c39 Iustin Pop
      # no need to check any newly-enabled hypervisors, since the
2420 cced4c39 Iustin Pop
      # defaults have already been checked in the above code-block
2421 cced4c39 Iustin Pop
      for os_name, os_hvp in self.new_os_hvp.items():
2422 cced4c39 Iustin Pop
        for hv_name, hv_params in os_hvp.items():
2423 cced4c39 Iustin Pop
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2424 cced4c39 Iustin Pop
          # we need to fill in the new os_hvp on top of the actual hv_p
2425 cced4c39 Iustin Pop
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2426 cced4c39 Iustin Pop
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2427 cced4c39 Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2428 cced4c39 Iustin Pop
          hv_class.CheckParameterSyntax(new_osp)
2429 cced4c39 Iustin Pop
          _CheckHVParams(self, node_list, hv_name, new_osp)
2430 cced4c39 Iustin Pop
2431 cced4c39 Iustin Pop
2432 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
2433 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
2434 8084f9f6 Manuel Franceschini

2435 8084f9f6 Manuel Franceschini
    """
2436 779c15bb Iustin Pop
    if self.op.vg_name is not None:
2437 b2482333 Guido Trotter
      new_volume = self.op.vg_name
2438 b2482333 Guido Trotter
      if not new_volume:
2439 b2482333 Guido Trotter
        new_volume = None
2440 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
2441 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
2442 779c15bb Iustin Pop
      else:
2443 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
2444 779c15bb Iustin Pop
                    " state, not changing")
2445 779c15bb Iustin Pop
    if self.op.hvparams:
2446 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2447 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2448 17463d22 Renรฉ Nussbaumer
      self.cluster.os_hvp = self.new_os_hvp
2449 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2450 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2451 779c15bb Iustin Pop
    if self.op.beparams:
2452 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2453 5af3da74 Guido Trotter
    if self.op.nicparams:
2454 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2455 5af3da74 Guido Trotter
2456 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2457 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2458 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2459 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2460 4b7735f9 Iustin Pop
2461 3953242f Iustin Pop
    if self.op.maintain_node_health is not None:
2462 3953242f Iustin Pop
      self.cluster.maintain_node_health = self.op.maintain_node_health
2463 3953242f Iustin Pop
2464 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2465 8084f9f6 Manuel Franceschini
2466 8084f9f6 Manuel Franceschini
2467 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2468 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2469 28eddce5 Guido Trotter

2470 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2471 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2472 28eddce5 Guido Trotter
  makes sure those are copied.
2473 28eddce5 Guido Trotter

2474 28eddce5 Guido Trotter
  @param lu: calling logical unit
2475 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2476 28eddce5 Guido Trotter

2477 28eddce5 Guido Trotter
  """
2478 28eddce5 Guido Trotter
  # 1. Gather target nodes
2479 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2480 6819dc49 Iustin Pop
  dist_nodes = lu.cfg.GetOnlineNodeList()
2481 28eddce5 Guido Trotter
  if additional_nodes is not None:
2482 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2483 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2484 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2485 a4eae71f Michael Hanselmann
2486 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2487 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2488 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2489 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2490 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2491 6b7d5878 Michael Hanselmann
                    constants.CONFD_HMAC_KEY,
2492 28eddce5 Guido Trotter
                   ])
2493 e1b8653f Guido Trotter
2494 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2495 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2496 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2497 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2498 e1b8653f Guido Trotter
2499 28eddce5 Guido Trotter
  # 3. Perform the files upload
2500 28eddce5 Guido Trotter
  for fname in dist_files:
2501 28eddce5 Guido Trotter
    if os.path.exists(fname):
2502 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2503 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2504 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2505 6f7d4e75 Iustin Pop
        if msg:
2506 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2507 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2508 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2509 28eddce5 Guido Trotter
2510 28eddce5 Guido Trotter
2511 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2512 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2513 afee0879 Iustin Pop

2514 afee0879 Iustin Pop
  This is a very simple LU.
2515 afee0879 Iustin Pop

2516 afee0879 Iustin Pop
  """
2517 afee0879 Iustin Pop
  _OP_REQP = []
2518 afee0879 Iustin Pop
  REQ_BGL = False
2519 afee0879 Iustin Pop
2520 afee0879 Iustin Pop
  def ExpandNames(self):
2521 afee0879 Iustin Pop
    self.needed_locks = {
2522 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2523 afee0879 Iustin Pop
    }
2524 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2525 afee0879 Iustin Pop
2526 afee0879 Iustin Pop
  def CheckPrereq(self):
2527 afee0879 Iustin Pop
    """Check prerequisites.
2528 afee0879 Iustin Pop

2529 afee0879 Iustin Pop
    """
2530 afee0879 Iustin Pop
2531 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2532 afee0879 Iustin Pop
    """Redistribute the configuration.
2533 afee0879 Iustin Pop

2534 afee0879 Iustin Pop
    """
2535 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2536 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2537 afee0879 Iustin Pop
2538 afee0879 Iustin Pop
2539 b6c07b79 Michael Hanselmann
def _WaitForSync(lu, instance, oneshot=False):
2540 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2541 a8083063 Iustin Pop

2542 a8083063 Iustin Pop
  """
2543 a8083063 Iustin Pop
  if not instance.disks:
2544 a8083063 Iustin Pop
    return True
2545 a8083063 Iustin Pop
2546 a8083063 Iustin Pop
  if not oneshot:
2547 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2548 a8083063 Iustin Pop
2549 a8083063 Iustin Pop
  node = instance.primary_node
2550 a8083063 Iustin Pop
2551 a8083063 Iustin Pop
  for dev in instance.disks:
2552 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2553 a8083063 Iustin Pop
2554 6bcb1446 Michael Hanselmann
  # TODO: Convert to utils.Retry
2555 6bcb1446 Michael Hanselmann
2556 a8083063 Iustin Pop
  retries = 0
2557 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2558 a8083063 Iustin Pop
  while True:
2559 a8083063 Iustin Pop
    max_time = 0
2560 a8083063 Iustin Pop
    done = True
2561 a8083063 Iustin Pop
    cumul_degraded = False
2562 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2563 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2564 3efa9051 Iustin Pop
    if msg:
2565 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2566 a8083063 Iustin Pop
      retries += 1
2567 a8083063 Iustin Pop
      if retries >= 10:
2568 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2569 3ecf6786 Iustin Pop
                                 " aborting." % node)
2570 a8083063 Iustin Pop
      time.sleep(6)
2571 a8083063 Iustin Pop
      continue
2572 3efa9051 Iustin Pop
    rstats = rstats.payload
2573 a8083063 Iustin Pop
    retries = 0
2574 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2575 a8083063 Iustin Pop
      if mstat is None:
2576 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2577 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2578 a8083063 Iustin Pop
        continue
2579 36145b12 Michael Hanselmann
2580 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2581 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2582 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2583 a8083063 Iustin Pop
        done = False
2584 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2585 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2586 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2587 a8083063 Iustin Pop
        else:
2588 a8083063 Iustin Pop
          rem_time = "no time estimate"
2589 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2590 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2591 4d4a651d Michael Hanselmann
                         rem_time))
2592 fbafd7a8 Iustin Pop
2593 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2594 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2595 fbafd7a8 Iustin Pop
    # we force restart of the loop
2596 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2597 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2598 fbafd7a8 Iustin Pop
      degr_retries -= 1
2599 fbafd7a8 Iustin Pop
      time.sleep(1)
2600 fbafd7a8 Iustin Pop
      continue
2601 fbafd7a8 Iustin Pop
2602 a8083063 Iustin Pop
    if done or oneshot:
2603 a8083063 Iustin Pop
      break
2604 a8083063 Iustin Pop
2605 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2606 a8083063 Iustin Pop
2607 a8083063 Iustin Pop
  if done:
2608 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2609 a8083063 Iustin Pop
  return not cumul_degraded
2610 a8083063 Iustin Pop
2611 a8083063 Iustin Pop
2612 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2613 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2614 a8083063 Iustin Pop

2615 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2616 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2617 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2618 0834c866 Iustin Pop

2619 a8083063 Iustin Pop
  """
2620 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2621 a8083063 Iustin Pop
2622 a8083063 Iustin Pop
  result = True
2623 96acbc09 Michael Hanselmann
2624 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2625 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2626 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2627 23829f6f Iustin Pop
    if msg:
2628 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2629 23829f6f Iustin Pop
      result = False
2630 23829f6f Iustin Pop
    elif not rstats.payload:
2631 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2632 a8083063 Iustin Pop
      result = False
2633 a8083063 Iustin Pop
    else:
2634 96acbc09 Michael Hanselmann
      if ldisk:
2635 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2636 96acbc09 Michael Hanselmann
      else:
2637 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2638 96acbc09 Michael Hanselmann
2639 a8083063 Iustin Pop
  if dev.children:
2640 a8083063 Iustin Pop
    for child in dev.children:
2641 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2642 a8083063 Iustin Pop
2643 a8083063 Iustin Pop
  return result
2644 a8083063 Iustin Pop
2645 a8083063 Iustin Pop
2646 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2647 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2648 a8083063 Iustin Pop

2649 a8083063 Iustin Pop
  """
2650 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2651 6bf01bbb Guido Trotter
  REQ_BGL = False
2652 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2653 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2654 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2655 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2656 a8083063 Iustin Pop
2657 6bf01bbb Guido Trotter
  def ExpandNames(self):
2658 1f9430d6 Iustin Pop
    if self.op.names:
2659 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2660 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2661 1f9430d6 Iustin Pop
2662 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2663 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2664 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2665 1f9430d6 Iustin Pop
2666 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2667 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2668 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2669 6bf01bbb Guido Trotter
    self.needed_locks = {}
2670 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2671 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2672 6bf01bbb Guido Trotter
2673 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2674 6bf01bbb Guido Trotter
    """Check prerequisites.
2675 6bf01bbb Guido Trotter

2676 6bf01bbb Guido Trotter
    """
2677 6bf01bbb Guido Trotter
2678 1f9430d6 Iustin Pop
  @staticmethod
2679 857121ad Iustin Pop
  def _DiagnoseByOS(rlist):
2680 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2681 1f9430d6 Iustin Pop

2682 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2683 1f9430d6 Iustin Pop

2684 e4376078 Iustin Pop
    @rtype: dict
2685 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2686 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2687 e4376078 Iustin Pop

2688 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2689 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2690 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2691 e4376078 Iustin Pop
          }
2692 1f9430d6 Iustin Pop

2693 1f9430d6 Iustin Pop
    """
2694 1f9430d6 Iustin Pop
    all_os = {}
2695 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2696 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2697 a6ab004b Iustin Pop
    # make all OSes invalid
2698 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2699 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2700 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2701 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2702 1f9430d6 Iustin Pop
        continue
2703 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2704 255dcebd Iustin Pop
        if name not in all_os:
2705 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2706 1f9430d6 Iustin Pop
          # for each node in node_list
2707 255dcebd Iustin Pop
          all_os[name] = {}
2708 a6ab004b Iustin Pop
          for nname in good_nodes:
2709 255dcebd Iustin Pop
            all_os[name][nname] = []
2710 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2711 1f9430d6 Iustin Pop
    return all_os
2712 a8083063 Iustin Pop
2713 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2714 a8083063 Iustin Pop
    """Compute the list of OSes.
2715 a8083063 Iustin Pop

2716 a8083063 Iustin Pop
    """
2717 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2718 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2719 857121ad Iustin Pop
    pol = self._DiagnoseByOS(node_data)
2720 1f9430d6 Iustin Pop
    output = []
2721 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2722 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2723 1e288a26 Guido Trotter
2724 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2725 1f9430d6 Iustin Pop
      row = []
2726 1e288a26 Guido Trotter
      if calc_valid:
2727 1e288a26 Guido Trotter
        valid = True
2728 1e288a26 Guido Trotter
        variants = None
2729 1e288a26 Guido Trotter
        for osl in os_data.values():
2730 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2731 1e288a26 Guido Trotter
          if not valid:
2732 1e288a26 Guido Trotter
            variants = None
2733 1e288a26 Guido Trotter
            break
2734 1e288a26 Guido Trotter
          if calc_variants:
2735 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2736 1e288a26 Guido Trotter
            if variants is None:
2737 1e288a26 Guido Trotter
              variants = node_variants
2738 1e288a26 Guido Trotter
            else:
2739 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2740 1e288a26 Guido Trotter
2741 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2742 1f9430d6 Iustin Pop
        if field == "name":
2743 1f9430d6 Iustin Pop
          val = os_name
2744 1f9430d6 Iustin Pop
        elif field == "valid":
2745 1e288a26 Guido Trotter
          val = valid
2746 1f9430d6 Iustin Pop
        elif field == "node_status":
2747 255dcebd Iustin Pop
          # this is just a copy of the dict
2748 1f9430d6 Iustin Pop
          val = {}
2749 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2750 255dcebd Iustin Pop
            val[node_name] = nos_list
2751 1e288a26 Guido Trotter
        elif field == "variants":
2752 1e288a26 Guido Trotter
          val =  variants
2753 1f9430d6 Iustin Pop
        else:
2754 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2755 1f9430d6 Iustin Pop
        row.append(val)
2756 1f9430d6 Iustin Pop
      output.append(row)
2757 1f9430d6 Iustin Pop
2758 1f9430d6 Iustin Pop
    return output
2759 a8083063 Iustin Pop
2760 a8083063 Iustin Pop
2761 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2762 a8083063 Iustin Pop
  """Logical unit for removing a node.
2763 a8083063 Iustin Pop

2764 a8083063 Iustin Pop
  """
2765 a8083063 Iustin Pop
  HPATH = "node-remove"
2766 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2767 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2768 a8083063 Iustin Pop
2769 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2770 a8083063 Iustin Pop
    """Build hooks env.
2771 a8083063 Iustin Pop

2772 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2773 d08869ee Guido Trotter
    node would then be impossible to remove.
2774 a8083063 Iustin Pop

2775 a8083063 Iustin Pop
    """
2776 396e1b78 Michael Hanselmann
    env = {
2777 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2778 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2779 396e1b78 Michael Hanselmann
      }
2780 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2781 9bb31ea8 Iustin Pop
    try:
2782 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2783 9bb31ea8 Iustin Pop
    except ValueError:
2784 9bb31ea8 Iustin Pop
      logging.warning("Node %s which is about to be removed not found"
2785 9bb31ea8 Iustin Pop
                      " in the all nodes list", self.op.node_name)
2786 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
  def CheckPrereq(self):
2789 a8083063 Iustin Pop
    """Check prerequisites.
2790 a8083063 Iustin Pop

2791 a8083063 Iustin Pop
    This checks:
2792 a8083063 Iustin Pop
     - the node exists in the configuration
2793 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2794 a8083063 Iustin Pop
     - it's not the master
2795 a8083063 Iustin Pop

2796 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2797 a8083063 Iustin Pop

2798 a8083063 Iustin Pop
    """
2799 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2800 cf26a87a Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2801 cf26a87a Iustin Pop
    assert node is not None
2802 a8083063 Iustin Pop
2803 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2804 a8083063 Iustin Pop
2805 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2806 a8083063 Iustin Pop
    if node.name == masternode:
2807 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2808 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2809 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2810 a8083063 Iustin Pop
2811 a8083063 Iustin Pop
    for instance_name in instance_list:
2812 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2813 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2814 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2815 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2816 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2817 a8083063 Iustin Pop
    self.op.node_name = node.name
2818 a8083063 Iustin Pop
    self.node = node
2819 a8083063 Iustin Pop
2820 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2821 a8083063 Iustin Pop
    """Removes the node from the cluster.
2822 a8083063 Iustin Pop

2823 a8083063 Iustin Pop
    """
2824 a8083063 Iustin Pop
    node = self.node
2825 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2826 9a4f63d1 Iustin Pop
                 node.name)
2827 a8083063 Iustin Pop
2828 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2829 b989b9d9 Ken Wehr
2830 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2831 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2832 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2833 a8083063 Iustin Pop
2834 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2835 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2836 cd46f3b4 Luca Bigliardi
    try:
2837 1122eb25 Iustin Pop
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2838 3cb5c1e3 Luca Bigliardi
    except:
2839 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
2840 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2841 cd46f3b4 Luca Bigliardi
2842 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2843 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2844 0623d351 Iustin Pop
    if msg:
2845 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2846 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2847 c8a0948f Michael Hanselmann
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2850 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2851 a8083063 Iustin Pop

2852 a8083063 Iustin Pop
  """
2853 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
2854 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2855 35705d8f Guido Trotter
  REQ_BGL = False
2856 19bed813 Iustin Pop
2857 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2858 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2859 19bed813 Iustin Pop
2860 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2861 31bf511f Iustin Pop
    "dtotal", "dfree",
2862 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2863 31bf511f Iustin Pop
    "bootid",
2864 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2865 31bf511f Iustin Pop
    )
2866 31bf511f Iustin Pop
2867 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2868 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2869 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2870 31bf511f Iustin Pop
    "pip", "sip", "tags",
2871 0e67cdbe Iustin Pop
    "master",
2872 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2873 31bf511f Iustin Pop
    )
2874 a8083063 Iustin Pop
2875 35705d8f Guido Trotter
  def ExpandNames(self):
2876 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2877 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2878 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2879 a8083063 Iustin Pop
2880 35705d8f Guido Trotter
    self.needed_locks = {}
2881 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2882 c8d8b4c8 Iustin Pop
2883 c8d8b4c8 Iustin Pop
    if self.op.names:
2884 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2885 35705d8f Guido Trotter
    else:
2886 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2887 c8d8b4c8 Iustin Pop
2888 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2889 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2890 c8d8b4c8 Iustin Pop
    if self.do_locking:
2891 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2892 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2893 c8d8b4c8 Iustin Pop
2894 35705d8f Guido Trotter
  def CheckPrereq(self):
2895 35705d8f Guido Trotter
    """Check prerequisites.
2896 35705d8f Guido Trotter

2897 35705d8f Guido Trotter
    """
2898 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2899 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2900 c8d8b4c8 Iustin Pop
    pass
2901 a8083063 Iustin Pop
2902 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2903 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2904 a8083063 Iustin Pop

2905 a8083063 Iustin Pop
    """
2906 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2907 c8d8b4c8 Iustin Pop
    if self.do_locking:
2908 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2909 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2910 3fa93523 Guido Trotter
      nodenames = self.wanted
2911 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2912 3fa93523 Guido Trotter
      if missing:
2913 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2914 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2915 c8d8b4c8 Iustin Pop
    else:
2916 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2917 c1f1cbb2 Iustin Pop
2918 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2919 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2920 a8083063 Iustin Pop
2921 a8083063 Iustin Pop
    # begin data gathering
2922 a8083063 Iustin Pop
2923 bc8e4a1a Iustin Pop
    if self.do_node_query:
2924 a8083063 Iustin Pop
      live_data = {}
2925 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2926 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2927 a8083063 Iustin Pop
      for name in nodenames:
2928 781de953 Iustin Pop
        nodeinfo = node_data[name]
2929 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2930 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2931 d599d686 Iustin Pop
          fn = utils.TryConvert
2932 a8083063 Iustin Pop
          live_data[name] = {
2933 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2934 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2935 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2936 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2937 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2938 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2939 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2940 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2941 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2942 a8083063 Iustin Pop
            }
2943 a8083063 Iustin Pop
        else:
2944 a8083063 Iustin Pop
          live_data[name] = {}
2945 a8083063 Iustin Pop
    else:
2946 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2947 a8083063 Iustin Pop
2948 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2949 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2950 a8083063 Iustin Pop
2951 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2952 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2953 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2954 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2955 a8083063 Iustin Pop
2956 1122eb25 Iustin Pop
      for inst in inst_data.values():
2957 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2958 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2959 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2960 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2961 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2962 a8083063 Iustin Pop
2963 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2964 0e67cdbe Iustin Pop
2965 a8083063 Iustin Pop
    # end data gathering
2966 a8083063 Iustin Pop
2967 a8083063 Iustin Pop
    output = []
2968 a8083063 Iustin Pop
    for node in nodelist:
2969 a8083063 Iustin Pop
      node_output = []
2970 a8083063 Iustin Pop
      for field in self.op.output_fields:
2971 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2972 19bed813 Iustin Pop
          val = getattr(node, field)
2973 ec223efb Iustin Pop
        elif field == "pinst_list":
2974 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2975 ec223efb Iustin Pop
        elif field == "sinst_list":
2976 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2977 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2978 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2979 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2980 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2981 a8083063 Iustin Pop
        elif field == "pip":
2982 a8083063 Iustin Pop
          val = node.primary_ip
2983 a8083063 Iustin Pop
        elif field == "sip":
2984 a8083063 Iustin Pop
          val = node.secondary_ip
2985 130a6a6f Iustin Pop
        elif field == "tags":
2986 130a6a6f Iustin Pop
          val = list(node.GetTags())
2987 0e67cdbe Iustin Pop
        elif field == "master":
2988 0e67cdbe Iustin Pop
          val = node.name == master_node
2989 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2990 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2991 c120ff34 Iustin Pop
        elif field == "role":
2992 c120ff34 Iustin Pop
          if node.name == master_node:
2993 c120ff34 Iustin Pop
            val = "M"
2994 c120ff34 Iustin Pop
          elif node.master_candidate:
2995 c120ff34 Iustin Pop
            val = "C"
2996 c120ff34 Iustin Pop
          elif node.drained:
2997 c120ff34 Iustin Pop
            val = "D"
2998 c120ff34 Iustin Pop
          elif node.offline:
2999 c120ff34 Iustin Pop
            val = "O"
3000 c120ff34 Iustin Pop
          else:
3001 c120ff34 Iustin Pop
            val = "R"
3002 a8083063 Iustin Pop
        else:
3003 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3004 a8083063 Iustin Pop
        node_output.append(val)
3005 a8083063 Iustin Pop
      output.append(node_output)
3006 a8083063 Iustin Pop
3007 a8083063 Iustin Pop
    return output
3008 a8083063 Iustin Pop
3009 a8083063 Iustin Pop
3010 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
3011 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
3012 dcb93971 Michael Hanselmann

3013 dcb93971 Michael Hanselmann
  """
3014 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
3015 21a15682 Guido Trotter
  REQ_BGL = False
3016 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3017 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
3018 21a15682 Guido Trotter
3019 21a15682 Guido Trotter
  def ExpandNames(self):
3020 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3021 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3022 21a15682 Guido Trotter
                       selected=self.op.output_fields)
3023 21a15682 Guido Trotter
3024 21a15682 Guido Trotter
    self.needed_locks = {}
3025 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3026 21a15682 Guido Trotter
    if not self.op.nodes:
3027 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3028 21a15682 Guido Trotter
    else:
3029 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
3030 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
3031 dcb93971 Michael Hanselmann
3032 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
3033 dcb93971 Michael Hanselmann
    """Check prerequisites.
3034 dcb93971 Michael Hanselmann

3035 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
3036 dcb93971 Michael Hanselmann

3037 dcb93971 Michael Hanselmann
    """
3038 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3039 dcb93971 Michael Hanselmann
3040 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
3041 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3042 dcb93971 Michael Hanselmann

3043 dcb93971 Michael Hanselmann
    """
3044 a7ba5e53 Iustin Pop
    nodenames = self.nodes
3045 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
3046 dcb93971 Michael Hanselmann
3047 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3048 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
3049 dcb93971 Michael Hanselmann
3050 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3051 dcb93971 Michael Hanselmann
3052 dcb93971 Michael Hanselmann
    output = []
3053 dcb93971 Michael Hanselmann
    for node in nodenames:
3054 10bfe6cb Iustin Pop
      nresult = volumes[node]
3055 10bfe6cb Iustin Pop
      if nresult.offline:
3056 10bfe6cb Iustin Pop
        continue
3057 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
3058 10bfe6cb Iustin Pop
      if msg:
3059 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3060 37d19eb2 Michael Hanselmann
        continue
3061 37d19eb2 Michael Hanselmann
3062 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
3063 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
3064 dcb93971 Michael Hanselmann
3065 dcb93971 Michael Hanselmann
      for vol in node_vols:
3066 dcb93971 Michael Hanselmann
        node_output = []
3067 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
3068 dcb93971 Michael Hanselmann
          if field == "node":
3069 dcb93971 Michael Hanselmann
            val = node
3070 dcb93971 Michael Hanselmann
          elif field == "phys":
3071 dcb93971 Michael Hanselmann
            val = vol['dev']
3072 dcb93971 Michael Hanselmann
          elif field == "vg":
3073 dcb93971 Michael Hanselmann
            val = vol['vg']
3074 dcb93971 Michael Hanselmann
          elif field == "name":
3075 dcb93971 Michael Hanselmann
            val = vol['name']
3076 dcb93971 Michael Hanselmann
          elif field == "size":
3077 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
3078 dcb93971 Michael Hanselmann
          elif field == "instance":
3079 dcb93971 Michael Hanselmann
            for inst in ilist:
3080 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
3081 dcb93971 Michael Hanselmann
                continue
3082 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
3083 dcb93971 Michael Hanselmann
                val = inst.name
3084 dcb93971 Michael Hanselmann
                break
3085 dcb93971 Michael Hanselmann
            else:
3086 dcb93971 Michael Hanselmann
              val = '-'
3087 dcb93971 Michael Hanselmann
          else:
3088 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
3089 dcb93971 Michael Hanselmann
          node_output.append(str(val))
3090 dcb93971 Michael Hanselmann
3091 dcb93971 Michael Hanselmann
        output.append(node_output)
3092 dcb93971 Michael Hanselmann
3093 dcb93971 Michael Hanselmann
    return output
3094 dcb93971 Michael Hanselmann
3095 dcb93971 Michael Hanselmann
3096 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
3097 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
3098 9e5442ce Michael Hanselmann

3099 9e5442ce Michael Hanselmann
  """
3100 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3101 9e5442ce Michael Hanselmann
  REQ_BGL = False
3102 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3103 9e5442ce Michael Hanselmann
3104 0e3baaf3 Iustin Pop
  def CheckArguments(self):
3105 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
3106 9e5442ce Michael Hanselmann
3107 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
3108 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3109 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
3110 9e5442ce Michael Hanselmann
3111 0e3baaf3 Iustin Pop
  def ExpandNames(self):
3112 9e5442ce Michael Hanselmann
    self.needed_locks = {}
3113 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
3114 9e5442ce Michael Hanselmann
3115 9e5442ce Michael Hanselmann
    if self.op.nodes:
3116 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
3117 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
3118 9e5442ce Michael Hanselmann
    else:
3119 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3120 9e5442ce Michael Hanselmann
3121 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
3122 9e5442ce Michael Hanselmann
    """Check prerequisites.
3123 9e5442ce Michael Hanselmann

3124 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
3125 9e5442ce Michael Hanselmann

3126 9e5442ce Michael Hanselmann
    """
3127 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
3128 9e5442ce Michael Hanselmann
3129 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3130 9e5442ce Michael Hanselmann
3131 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
3132 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
3133 9e5442ce Michael Hanselmann

3134 9e5442ce Michael Hanselmann
    """
3135 9e5442ce Michael Hanselmann
    # Always get name to sort by
3136 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
3137 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
3138 9e5442ce Michael Hanselmann
    else:
3139 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
3140 9e5442ce Michael Hanselmann
3141 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
3142 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3143 620a85fd Iustin Pop
      while extra in fields:
3144 620a85fd Iustin Pop
        fields.remove(extra)
3145 9e5442ce Michael Hanselmann
3146 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3147 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
3148 9e5442ce Michael Hanselmann
3149 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3150 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
3151 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
3152 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
3153 9e5442ce Michael Hanselmann
3154 9e5442ce Michael Hanselmann
    result = []
3155 9e5442ce Michael Hanselmann
3156 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
3157 9e5442ce Michael Hanselmann
      nresult = data[node]
3158 9e5442ce Michael Hanselmann
      if nresult.offline:
3159 9e5442ce Michael Hanselmann
        continue
3160 9e5442ce Michael Hanselmann
3161 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
3162 9e5442ce Michael Hanselmann
      if msg:
3163 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3164 9e5442ce Michael Hanselmann
        continue
3165 9e5442ce Michael Hanselmann
3166 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3167 9e5442ce Michael Hanselmann
3168 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
3169 9e5442ce Michael Hanselmann
        row = rows[name]
3170 9e5442ce Michael Hanselmann
3171 9e5442ce Michael Hanselmann
        out = []
3172 9e5442ce Michael Hanselmann
3173 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
3174 620a85fd Iustin Pop
          if field == constants.SF_NODE:
3175 9e5442ce Michael Hanselmann
            val = node
3176 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
3177 620a85fd Iustin Pop
            val = self.op.storage_type
3178 9e5442ce Michael Hanselmann
          elif field in field_idx:
3179 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
3180 9e5442ce Michael Hanselmann
          else:
3181 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
3182 9e5442ce Michael Hanselmann
3183 9e5442ce Michael Hanselmann
          out.append(val)
3184 9e5442ce Michael Hanselmann
3185 9e5442ce Michael Hanselmann
        result.append(out)
3186 9e5442ce Michael Hanselmann
3187 9e5442ce Michael Hanselmann
    return result
3188 9e5442ce Michael Hanselmann
3189 9e5442ce Michael Hanselmann
3190 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
3191 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
3192 efb8da02 Michael Hanselmann

3193 efb8da02 Michael Hanselmann
  """
3194 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3195 efb8da02 Michael Hanselmann
  REQ_BGL = False
3196 efb8da02 Michael Hanselmann
3197 efb8da02 Michael Hanselmann
  def CheckArguments(self):
3198 cf26a87a Iustin Pop
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3199 efb8da02 Michael Hanselmann
3200 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
3201 efb8da02 Michael Hanselmann
3202 efb8da02 Michael Hanselmann
  def ExpandNames(self):
3203 efb8da02 Michael Hanselmann
    self.needed_locks = {
3204 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
3205 efb8da02 Michael Hanselmann
      }
3206 efb8da02 Michael Hanselmann
3207 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
3208 efb8da02 Michael Hanselmann
    """Check prerequisites.
3209 efb8da02 Michael Hanselmann

3210 efb8da02 Michael Hanselmann
    """
3211 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3212 efb8da02 Michael Hanselmann
3213 efb8da02 Michael Hanselmann
    try:
3214 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3215 efb8da02 Michael Hanselmann
    except KeyError:
3216 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3217 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
3218 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3219 efb8da02 Michael Hanselmann
3220 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
3221 efb8da02 Michael Hanselmann
    if diff:
3222 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
3223 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
3224 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
3225 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3226 efb8da02 Michael Hanselmann
3227 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
3228 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3229 efb8da02 Michael Hanselmann

3230 efb8da02 Michael Hanselmann
    """
3231 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3232 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
3233 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
3234 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
3235 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
3236 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
3237 efb8da02 Michael Hanselmann
3238 efb8da02 Michael Hanselmann
3239 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
3240 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
3241 a8083063 Iustin Pop

3242 a8083063 Iustin Pop
  """
3243 a8083063 Iustin Pop
  HPATH = "node-add"
3244 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3245 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
3246 a8083063 Iustin Pop
3247 44caf5a8 Iustin Pop
  def CheckArguments(self):
3248 44caf5a8 Iustin Pop
    # validate/normalize the node name
3249 44caf5a8 Iustin Pop
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3250 44caf5a8 Iustin Pop
3251 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3252 a8083063 Iustin Pop
    """Build hooks env.
3253 a8083063 Iustin Pop

3254 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
3255 a8083063 Iustin Pop

3256 a8083063 Iustin Pop
    """
3257 a8083063 Iustin Pop
    env = {
3258 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
3259 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
3260 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
3261 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
3262 a8083063 Iustin Pop
      }
3263 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
3264 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
3265 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
3266 a8083063 Iustin Pop
3267 a8083063 Iustin Pop
  def CheckPrereq(self):
3268 a8083063 Iustin Pop
    """Check prerequisites.
3269 a8083063 Iustin Pop

3270 a8083063 Iustin Pop
    This checks:
3271 a8083063 Iustin Pop
     - the new node is not already in the config
3272 a8083063 Iustin Pop
     - it is resolvable
3273 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
3274 a8083063 Iustin Pop

3275 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
3276 a8083063 Iustin Pop

3277 a8083063 Iustin Pop
    """
3278 a8083063 Iustin Pop
    node_name = self.op.node_name
3279 a8083063 Iustin Pop
    cfg = self.cfg
3280 a8083063 Iustin Pop
3281 104f4ca1 Iustin Pop
    dns_data = utils.GetHostInfo(node_name)
3282 a8083063 Iustin Pop
3283 bcf043c9 Iustin Pop
    node = dns_data.name
3284 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
3285 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
3286 a8083063 Iustin Pop
    if secondary_ip is None:
3287 a8083063 Iustin Pop
      secondary_ip = primary_ip
3288 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
3289 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
3290 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3291 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
3292 e7c6e02b Michael Hanselmann
3293 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
3294 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
3295 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3296 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
3297 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
3298 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3299 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
3300 a8083063 Iustin Pop
3301 a8083063 Iustin Pop
    for existing_node_name in node_list:
3302 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
3303 e7c6e02b Michael Hanselmann
3304 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
3305 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
3306 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
3307 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3308 5c983ee5 Iustin Pop
                                     " address configuration as before",
3309 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
3310 e7c6e02b Michael Hanselmann
        continue
3311 e7c6e02b Michael Hanselmann
3312 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
3313 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
3314 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
3315 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
3316 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3317 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
3318 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
3319 a8083063 Iustin Pop
3320 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
3321 a8083063 Iustin Pop
    # same as for the master
3322 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3323 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3324 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
3325 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
3326 a8083063 Iustin Pop
      if master_singlehomed:
3327 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
3328 5c983ee5 Iustin Pop
                                   " new node has one",
3329 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3330 a8083063 Iustin Pop
      else:
3331 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
3332 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
3333 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3334 a8083063 Iustin Pop
3335 5bbd3f7f Michael Hanselmann
    # checks reachability
3336 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3337 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
3338 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
3339 a8083063 Iustin Pop
3340 a8083063 Iustin Pop
    if not newbie_singlehomed:
3341 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
3342 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3343 b15d625f Iustin Pop
                           source=myself.secondary_ip):
3344 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3345 5c983ee5 Iustin Pop
                                   " based ping to noded port",
3346 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
3347 a8083063 Iustin Pop
3348 a8ae3eb5 Iustin Pop
    if self.op.readd:
3349 a8ae3eb5 Iustin Pop
      exceptions = [node]
3350 a8ae3eb5 Iustin Pop
    else:
3351 a8ae3eb5 Iustin Pop
      exceptions = []
3352 6d7e1f20 Guido Trotter
3353 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3354 0fff97e9 Guido Trotter
3355 a8ae3eb5 Iustin Pop
    if self.op.readd:
3356 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
3357 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3358 a8ae3eb5 Iustin Pop
    else:
3359 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
3360 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
3361 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
3362 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
3363 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3366 a8083063 Iustin Pop
    """Adds the new node to the cluster.
3367 a8083063 Iustin Pop

3368 a8083063 Iustin Pop
    """
3369 a8083063 Iustin Pop
    new_node = self.new_node
3370 a8083063 Iustin Pop
    node = new_node.name
3371 a8083063 Iustin Pop
3372 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
3373 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
3374 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
3375 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
3376 a8ae3eb5 Iustin Pop
    if self.op.readd:
3377 7260cfbe Iustin Pop
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3378 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3379 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
3380 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
3381 a8ae3eb5 Iustin Pop
3382 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
3383 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
3384 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
3385 a8ae3eb5 Iustin Pop
3386 a8083063 Iustin Pop
    # check connectivity
3387 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
3388 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
3389 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
3390 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
3391 90b54c26 Iustin Pop
                   node, result.payload)
3392 a8083063 Iustin Pop
    else:
3393 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
3394 90b54c26 Iustin Pop
                               " node version %s" %
3395 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
3396 a8083063 Iustin Pop
3397 a8083063 Iustin Pop
    # setup ssh on node
3398 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3399 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
3400 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3401 b989b9d9 Ken Wehr
      keyarray = []
3402 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3403 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3404 b989b9d9 Ken Wehr
                  priv_key, pub_key]
3405 b989b9d9 Ken Wehr
3406 b989b9d9 Ken Wehr
      for i in keyfiles:
3407 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
3408 b989b9d9 Ken Wehr
3409 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3410 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
3411 b989b9d9 Ken Wehr
                                      keyarray[5])
3412 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
3413 a8083063 Iustin Pop
3414 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
3415 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3416 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
3417 c8a0948f Michael Hanselmann
3418 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
3419 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
3420 781de953 Iustin Pop
                                                 new_node.secondary_ip)
3421 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3422 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3423 c2fc8250 Iustin Pop
      if not result.payload:
3424 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3425 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
3426 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
3427 a8083063 Iustin Pop
3428 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
3429 5c0527ed Guido Trotter
    node_verify_param = {
3430 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
3431 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
3432 5c0527ed Guido Trotter
    }
3433 5c0527ed Guido Trotter
3434 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3435 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
3436 5c0527ed Guido Trotter
    for verifier in node_verify_list:
3437 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3438 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3439 6f68a739 Iustin Pop
      if nl_payload:
3440 6f68a739 Iustin Pop
        for failed in nl_payload:
3441 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
3442 31821208 Iustin Pop
                      " (checking from %s): %s" %
3443 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
3444 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
3445 ff98055b Iustin Pop
3446 d8470559 Michael Hanselmann
    if self.op.readd:
3447 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
3448 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
3449 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
3450 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
3451 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3452 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3453 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3454 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3455 a8ae3eb5 Iustin Pop
        if msg:
3456 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3457 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3458 d8470559 Michael Hanselmann
    else:
3459 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3460 0debfb35 Guido Trotter
      self.context.AddNode(new_node, self.proc.GetECId())
3461 a8083063 Iustin Pop
3462 a8083063 Iustin Pop
3463 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3464 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3465 b31c8676 Iustin Pop

3466 b31c8676 Iustin Pop
  """
3467 b31c8676 Iustin Pop
  HPATH = "node-modify"
3468 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3469 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3470 b31c8676 Iustin Pop
  REQ_BGL = False
3471 b31c8676 Iustin Pop
3472 b31c8676 Iustin Pop
  def CheckArguments(self):
3473 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3474 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3475 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3476 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3477 601908d0 Iustin Pop
    _CheckBooleanOpField(self.op, 'auto_promote')
3478 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3479 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3480 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3481 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3482 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3483 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3484 5c983ee5 Iustin Pop
                                 " state at the same time",
3485 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3486 b31c8676 Iustin Pop
3487 601908d0 Iustin Pop
    # Boolean value that tells us whether we're offlining or draining the node
3488 601908d0 Iustin Pop
    self.offline_or_drain = (self.op.offline == True or
3489 601908d0 Iustin Pop
                             self.op.drained == True)
3490 601908d0 Iustin Pop
    self.deoffline_or_drain = (self.op.offline == False or
3491 601908d0 Iustin Pop
                               self.op.drained == False)
3492 601908d0 Iustin Pop
    self.might_demote = (self.op.master_candidate == False or
3493 601908d0 Iustin Pop
                         self.offline_or_drain)
3494 601908d0 Iustin Pop
3495 601908d0 Iustin Pop
    self.lock_all = self.op.auto_promote and self.might_demote
3496 601908d0 Iustin Pop
3497 601908d0 Iustin Pop
3498 b31c8676 Iustin Pop
  def ExpandNames(self):
3499 601908d0 Iustin Pop
    if self.lock_all:
3500 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3501 601908d0 Iustin Pop
    else:
3502 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3503 b31c8676 Iustin Pop
3504 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3505 b31c8676 Iustin Pop
    """Build hooks env.
3506 b31c8676 Iustin Pop

3507 b31c8676 Iustin Pop
    This runs on the master node.
3508 b31c8676 Iustin Pop

3509 b31c8676 Iustin Pop
    """
3510 b31c8676 Iustin Pop
    env = {
3511 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3512 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3513 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3514 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3515 b31c8676 Iustin Pop
      }
3516 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3517 b31c8676 Iustin Pop
          self.op.node_name]
3518 b31c8676 Iustin Pop
    return env, nl, nl
3519 b31c8676 Iustin Pop
3520 b31c8676 Iustin Pop
  def CheckPrereq(self):
3521 b31c8676 Iustin Pop
    """Check prerequisites.
3522 b31c8676 Iustin Pop

3523 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3524 b31c8676 Iustin Pop

3525 b31c8676 Iustin Pop
    """
3526 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3527 b31c8676 Iustin Pop
3528 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3529 97c61d46 Iustin Pop
        self.op.drained is not None or
3530 97c61d46 Iustin Pop
        self.op.offline is not None):
3531 97c61d46 Iustin Pop
      # we can't change the master's node flags
3532 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3533 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3534 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3535 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3536 97c61d46 Iustin Pop
3537 601908d0 Iustin Pop
3538 601908d0 Iustin Pop
    if node.master_candidate and self.might_demote and not self.lock_all:
3539 601908d0 Iustin Pop
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3540 601908d0 Iustin Pop
      # check if after removing the current node, we're missing master
3541 601908d0 Iustin Pop
      # candidates
3542 601908d0 Iustin Pop
      (mc_remaining, mc_should, _) = \
3543 601908d0 Iustin Pop
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3544 8fe9239e Iustin Pop
      if mc_remaining < mc_should:
3545 601908d0 Iustin Pop
        raise errors.OpPrereqError("Not enough master candidates, please"
3546 601908d0 Iustin Pop
                                   " pass auto_promote to allow promotion",
3547 601908d0 Iustin Pop
                                   errors.ECODE_INVAL)
3548 3e83dd48 Iustin Pop
3549 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3550 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3551 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3552 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3553 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3554 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3555 3a5ba66a Iustin Pop
3556 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3557 601908d0 Iustin Pop
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3558 cea0534a Guido Trotter
        self.op.master_candidate == True and not node.master_candidate):
3559 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3560 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3561 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3562 3d9eb52b Guido Trotter
3563 b31c8676 Iustin Pop
    return
3564 b31c8676 Iustin Pop
3565 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3566 b31c8676 Iustin Pop
    """Modifies a node.
3567 b31c8676 Iustin Pop

3568 b31c8676 Iustin Pop
    """
3569 3a5ba66a Iustin Pop
    node = self.node
3570 b31c8676 Iustin Pop
3571 b31c8676 Iustin Pop
    result = []
3572 c9d443ea Iustin Pop
    changed_mc = False
3573 b31c8676 Iustin Pop
3574 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3575 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3576 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3577 c9d443ea Iustin Pop
      if self.op.offline == True:
3578 c9d443ea Iustin Pop
        if node.master_candidate:
3579 c9d443ea Iustin Pop
          node.master_candidate = False
3580 c9d443ea Iustin Pop
          changed_mc = True
3581 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3582 c9d443ea Iustin Pop
        if node.drained:
3583 c9d443ea Iustin Pop
          node.drained = False
3584 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3585 3a5ba66a Iustin Pop
3586 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3587 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3588 c9d443ea Iustin Pop
      changed_mc = True
3589 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3590 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3591 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3592 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3593 0959c824 Iustin Pop
        if msg:
3594 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3595 b31c8676 Iustin Pop
3596 c9d443ea Iustin Pop
    if self.op.drained is not None:
3597 c9d443ea Iustin Pop
      node.drained = self.op.drained
3598 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3599 c9d443ea Iustin Pop
      if self.op.drained == True:
3600 c9d443ea Iustin Pop
        if node.master_candidate:
3601 c9d443ea Iustin Pop
          node.master_candidate = False
3602 c9d443ea Iustin Pop
          changed_mc = True
3603 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3604 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3605 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3606 dec0d9da Iustin Pop
          if msg:
3607 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3608 c9d443ea Iustin Pop
        if node.offline:
3609 c9d443ea Iustin Pop
          node.offline = False
3610 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3611 c9d443ea Iustin Pop
3612 601908d0 Iustin Pop
    # we locked all nodes, we adjust the CP before updating this node
3613 601908d0 Iustin Pop
    if self.lock_all:
3614 601908d0 Iustin Pop
      _AdjustCandidatePool(self, [node.name])
3615 601908d0 Iustin Pop
3616 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3617 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3618 601908d0 Iustin Pop
3619 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3620 c9d443ea Iustin Pop
    if changed_mc:
3621 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3622 b31c8676 Iustin Pop
3623 b31c8676 Iustin Pop
    return result
3624 b31c8676 Iustin Pop
3625 b31c8676 Iustin Pop
3626 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3627 f5118ade Iustin Pop
  """Powercycles a node.
3628 f5118ade Iustin Pop

3629 f5118ade Iustin Pop
  """
3630 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3631 f5118ade Iustin Pop
  REQ_BGL = False
3632 f5118ade Iustin Pop
3633 f5118ade Iustin Pop
  def CheckArguments(self):
3634 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3635 cf26a87a Iustin Pop
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3636 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3637 5c983ee5 Iustin Pop
                                 " parameter was not set",
3638 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3639 f5118ade Iustin Pop
3640 f5118ade Iustin Pop
  def ExpandNames(self):
3641 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3642 f5118ade Iustin Pop

3643 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3644 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3645 f5118ade Iustin Pop

3646 f5118ade Iustin Pop
    """
3647 f5118ade Iustin Pop
    self.needed_locks = {}
3648 f5118ade Iustin Pop
3649 f5118ade Iustin Pop
  def CheckPrereq(self):
3650 f5118ade Iustin Pop
    """Check prerequisites.
3651 f5118ade Iustin Pop

3652 f5118ade Iustin Pop
    This LU has no prereqs.
3653 f5118ade Iustin Pop

3654 f5118ade Iustin Pop
    """
3655 f5118ade Iustin Pop
    pass
3656 f5118ade Iustin Pop
3657 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3658 f5118ade Iustin Pop
    """Reboots a node.
3659 f5118ade Iustin Pop

3660 f5118ade Iustin Pop
    """
3661 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3662 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3663 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3664 f5118ade Iustin Pop
    return result.payload
3665 f5118ade Iustin Pop
3666 f5118ade Iustin Pop
3667 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3668 a8083063 Iustin Pop
  """Query cluster configuration.
3669 a8083063 Iustin Pop

3670 a8083063 Iustin Pop
  """
3671 a8083063 Iustin Pop
  _OP_REQP = []
3672 642339cf Guido Trotter
  REQ_BGL = False
3673 642339cf Guido Trotter
3674 642339cf Guido Trotter
  def ExpandNames(self):
3675 642339cf Guido Trotter
    self.needed_locks = {}
3676 a8083063 Iustin Pop
3677 a8083063 Iustin Pop
  def CheckPrereq(self):
3678 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3679 a8083063 Iustin Pop

3680 a8083063 Iustin Pop
    """
3681 a8083063 Iustin Pop
    pass
3682 a8083063 Iustin Pop
3683 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3684 a8083063 Iustin Pop
    """Return cluster config.
3685 a8083063 Iustin Pop

3686 a8083063 Iustin Pop
    """
3687 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3688 17463d22 Renรฉ Nussbaumer
    os_hvp = {}
3689 17463d22 Renรฉ Nussbaumer
3690 17463d22 Renรฉ Nussbaumer
    # Filter just for enabled hypervisors
3691 17463d22 Renรฉ Nussbaumer
    for os_name, hv_dict in cluster.os_hvp.items():
3692 17463d22 Renรฉ Nussbaumer
      os_hvp[os_name] = {}
3693 17463d22 Renรฉ Nussbaumer
      for hv_name, hv_params in hv_dict.items():
3694 17463d22 Renรฉ Nussbaumer
        if hv_name in cluster.enabled_hypervisors:
3695 17463d22 Renรฉ Nussbaumer
          os_hvp[os_name][hv_name] = hv_params
3696 17463d22 Renรฉ Nussbaumer
3697 a8083063 Iustin Pop
    result = {
3698 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3699 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3700 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3701 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3702 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3703 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3704 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3705 469f88e1 Iustin Pop
      "master": cluster.master_node,
3706 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3707 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3708 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3709 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3710 17463d22 Renรฉ Nussbaumer
      "os_hvp": os_hvp,
3711 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3712 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3713 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3714 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3715 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3716 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3717 3953242f Iustin Pop
      "maintain_node_health": cluster.maintain_node_health,
3718 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3719 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3720 259578eb Iustin Pop
      "uuid": cluster.uuid,
3721 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3722 a8083063 Iustin Pop
      }
3723 a8083063 Iustin Pop
3724 a8083063 Iustin Pop
    return result
3725 a8083063 Iustin Pop
3726 a8083063 Iustin Pop
3727 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3728 ae5849b5 Michael Hanselmann
  """Return configuration values.
3729 a8083063 Iustin Pop

3730 a8083063 Iustin Pop
  """
3731 a8083063 Iustin Pop
  _OP_REQP = []
3732 642339cf Guido Trotter
  REQ_BGL = False
3733 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3734 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3735 05e50653 Michael Hanselmann
                                  "watcher_pause")
3736 642339cf Guido Trotter
3737 642339cf Guido Trotter
  def ExpandNames(self):
3738 642339cf Guido Trotter
    self.needed_locks = {}
3739 a8083063 Iustin Pop
3740 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3741 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3742 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3743 ae5849b5 Michael Hanselmann
3744 a8083063 Iustin Pop
  def CheckPrereq(self):
3745 a8083063 Iustin Pop
    """No prerequisites.
3746 a8083063 Iustin Pop

3747 a8083063 Iustin Pop
    """
3748 a8083063 Iustin Pop
    pass
3749 a8083063 Iustin Pop
3750 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3751 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3752 a8083063 Iustin Pop

3753 a8083063 Iustin Pop
    """
3754 ae5849b5 Michael Hanselmann
    values = []
3755 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3756 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3757 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3758 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3759 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3760 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3761 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3762 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3763 cac599f1 Michael Hanselmann
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3764 ae5849b5 Michael Hanselmann
      else:
3765 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3766 3ccafd0e Iustin Pop
      values.append(entry)
3767 ae5849b5 Michael Hanselmann
    return values
3768 a8083063 Iustin Pop
3769 a8083063 Iustin Pop
3770 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3771 a8083063 Iustin Pop
  """Bring up an instance's disks.
3772 a8083063 Iustin Pop

3773 a8083063 Iustin Pop
  """
3774 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3775 f22a8ba3 Guido Trotter
  REQ_BGL = False
3776 f22a8ba3 Guido Trotter
3777 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3778 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3779 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3780 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3781 f22a8ba3 Guido Trotter
3782 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3783 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3784 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3785 a8083063 Iustin Pop
3786 a8083063 Iustin Pop
  def CheckPrereq(self):
3787 a8083063 Iustin Pop
    """Check prerequisites.
3788 a8083063 Iustin Pop

3789 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3790 a8083063 Iustin Pop

3791 a8083063 Iustin Pop
    """
3792 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3793 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3794 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3795 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3796 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3797 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3798 a8083063 Iustin Pop
3799 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3800 a8083063 Iustin Pop
    """Activate the disks.
3801 a8083063 Iustin Pop

3802 a8083063 Iustin Pop
    """
3803 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3804 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3805 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3806 a8083063 Iustin Pop
    if not disks_ok:
3807 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3808 a8083063 Iustin Pop
3809 a8083063 Iustin Pop
    return disks_info
3810 a8083063 Iustin Pop
3811 a8083063 Iustin Pop
3812 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3813 e3443b36 Iustin Pop
                           ignore_size=False):
3814 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3815 a8083063 Iustin Pop

3816 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3817 a8083063 Iustin Pop

3818 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3819 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3820 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3821 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3822 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3823 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3824 e4376078 Iustin Pop
      won't result in an error return from the function
3825 e3443b36 Iustin Pop
  @type ignore_size: boolean
3826 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3827 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3828 e3443b36 Iustin Pop
      when the size is wrong
3829 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3830 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3831 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3832 a8083063 Iustin Pop

3833 a8083063 Iustin Pop
  """
3834 a8083063 Iustin Pop
  device_info = []
3835 a8083063 Iustin Pop
  disks_ok = True
3836 fdbd668d Iustin Pop
  iname = instance.name
3837 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3838 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3839 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3840 fdbd668d Iustin Pop
3841 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3842 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3843 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3844 fdbd668d Iustin Pop
  # SyncSource, etc.)
3845 fdbd668d Iustin Pop
3846 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3847 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3848 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3849 e3443b36 Iustin Pop
      if ignore_size:
3850 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3851 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3852 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3853 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3854 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3855 53c14ef1 Iustin Pop
      if msg:
3856 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3857 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3858 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3859 fdbd668d Iustin Pop
        if not ignore_secondaries:
3860 a8083063 Iustin Pop
          disks_ok = False
3861 fdbd668d Iustin Pop
3862 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3863 fdbd668d Iustin Pop
3864 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3865 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3866 d52ea991 Michael Hanselmann
    dev_path = None
3867 d52ea991 Michael Hanselmann
3868 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3869 fdbd668d Iustin Pop
      if node != instance.primary_node:
3870 fdbd668d Iustin Pop
        continue
3871 e3443b36 Iustin Pop
      if ignore_size:
3872 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3873 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3874 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3875 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3876 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3877 53c14ef1 Iustin Pop
      if msg:
3878 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3879 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3880 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3881 fdbd668d Iustin Pop
        disks_ok = False
3882 d52ea991 Michael Hanselmann
      else:
3883 d52ea991 Michael Hanselmann
        dev_path = result.payload
3884 d52ea991 Michael Hanselmann
3885 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3886 a8083063 Iustin Pop
3887 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3888 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3889 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3890 b352ab5b Iustin Pop
  for disk in instance.disks:
3891 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3892 b352ab5b Iustin Pop
3893 a8083063 Iustin Pop
  return disks_ok, device_info
3894 a8083063 Iustin Pop
3895 a8083063 Iustin Pop
3896 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3897 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3898 3ecf6786 Iustin Pop

3899 3ecf6786 Iustin Pop
  """
3900 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3901 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3902 fe7b0351 Michael Hanselmann
  if not disks_ok:
3903 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3904 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3905 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3906 86d9d3bb Iustin Pop
                         " secondary node,"
3907 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3908 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3909 fe7b0351 Michael Hanselmann
3910 fe7b0351 Michael Hanselmann
3911 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3912 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3913 a8083063 Iustin Pop

3914 a8083063 Iustin Pop
  """
3915 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3916 f22a8ba3 Guido Trotter
  REQ_BGL = False
3917 f22a8ba3 Guido Trotter
3918 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3919 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3920 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3921 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3922 f22a8ba3 Guido Trotter
3923 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3924 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3925 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3926 a8083063 Iustin Pop
3927 a8083063 Iustin Pop
  def CheckPrereq(self):
3928 a8083063 Iustin Pop
    """Check prerequisites.
3929 a8083063 Iustin Pop

3930 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3931 a8083063 Iustin Pop

3932 a8083063 Iustin Pop
    """
3933 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3934 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3935 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3936 a8083063 Iustin Pop
3937 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3938 a8083063 Iustin Pop
    """Deactivate the disks
3939 a8083063 Iustin Pop

3940 a8083063 Iustin Pop
    """
3941 a8083063 Iustin Pop
    instance = self.instance
3942 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3943 a8083063 Iustin Pop
3944 a8083063 Iustin Pop
3945 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3946 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3947 155d6c75 Guido Trotter

3948 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3949 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3950 155d6c75 Guido Trotter

3951 155d6c75 Guido Trotter
  """
3952 31624382 Iustin Pop
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
3953 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3954 a8083063 Iustin Pop
3955 a8083063 Iustin Pop
3956 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3957 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3958 a8083063 Iustin Pop

3959 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3960 a8083063 Iustin Pop

3961 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3962 a8083063 Iustin Pop
  ignored.
3963 a8083063 Iustin Pop

3964 a8083063 Iustin Pop
  """
3965 cacfd1fd Iustin Pop
  all_result = True
3966 a8083063 Iustin Pop
  for disk in instance.disks:
3967 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3968 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3969 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3970 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3971 cacfd1fd Iustin Pop
      if msg:
3972 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3973 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3974 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3975 cacfd1fd Iustin Pop
          all_result = False
3976 cacfd1fd Iustin Pop
  return all_result
3977 a8083063 Iustin Pop
3978 a8083063 Iustin Pop
3979 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3980 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3981 d4f16fd9 Iustin Pop

3982 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3983 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3984 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3985 d4f16fd9 Iustin Pop
  exception.
3986 d4f16fd9 Iustin Pop

3987 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3988 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3989 e69d05fd Iustin Pop
  @type node: C{str}
3990 e69d05fd Iustin Pop
  @param node: the node to check
3991 e69d05fd Iustin Pop
  @type reason: C{str}
3992 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3993 e69d05fd Iustin Pop
  @type requested: C{int}
3994 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3995 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3996 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3997 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3998 e69d05fd Iustin Pop
      we cannot check the node
3999 d4f16fd9 Iustin Pop

4000 d4f16fd9 Iustin Pop
  """
4001 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4002 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
4003 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
4004 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
4005 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
4006 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4007 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
4008 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
4009 d4f16fd9 Iustin Pop
  if requested > free_mem:
4010 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4011 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
4012 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
4013 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
4014 d4f16fd9 Iustin Pop
4015 d4f16fd9 Iustin Pop
4016 701384a9 Iustin Pop
def _CheckNodesFreeDisk(lu, nodenames, requested):
4017 701384a9 Iustin Pop
  """Checks if nodes have enough free disk space in the default VG.
4018 701384a9 Iustin Pop

4019 701384a9 Iustin Pop
  This function check if all given nodes have the needed amount of
4020 701384a9 Iustin Pop
  free disk. In case any node has less disk or we cannot get the
4021 701384a9 Iustin Pop
  information from the node, this function raise an OpPrereqError
4022 701384a9 Iustin Pop
  exception.
4023 701384a9 Iustin Pop

4024 701384a9 Iustin Pop
  @type lu: C{LogicalUnit}
4025 701384a9 Iustin Pop
  @param lu: a logical unit from which we get configuration data
4026 701384a9 Iustin Pop
  @type nodenames: C{list}
4027 3a488770 Iustin Pop
  @param nodenames: the list of node names to check
4028 701384a9 Iustin Pop
  @type requested: C{int}
4029 701384a9 Iustin Pop
  @param requested: the amount of disk in MiB to check for
4030 701384a9 Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4031 701384a9 Iustin Pop
      we cannot check the node
4032 701384a9 Iustin Pop

4033 701384a9 Iustin Pop
  """
4034 701384a9 Iustin Pop
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4035 701384a9 Iustin Pop
                                   lu.cfg.GetHypervisorType())
4036 701384a9 Iustin Pop
  for node in nodenames:
4037 701384a9 Iustin Pop
    info = nodeinfo[node]
4038 701384a9 Iustin Pop
    info.Raise("Cannot get current information from node %s" % node,
4039 701384a9 Iustin Pop
               prereq=True, ecode=errors.ECODE_ENVIRON)
4040 701384a9 Iustin Pop
    vg_free = info.payload.get("vg_free", None)
4041 701384a9 Iustin Pop
    if not isinstance(vg_free, int):
4042 701384a9 Iustin Pop
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4043 701384a9 Iustin Pop
                                 " result was '%s'" % (node, vg_free),
4044 701384a9 Iustin Pop
                                 errors.ECODE_ENVIRON)
4045 701384a9 Iustin Pop
    if requested > vg_free:
4046 701384a9 Iustin Pop
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4047 701384a9 Iustin Pop
                                 " required %d MiB, available %d MiB" %
4048 701384a9 Iustin Pop
                                 (node, requested, vg_free),
4049 701384a9 Iustin Pop
                                 errors.ECODE_NORES)
4050 701384a9 Iustin Pop
4051 701384a9 Iustin Pop
4052 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
4053 a8083063 Iustin Pop
  """Starts an instance.
4054 a8083063 Iustin Pop

4055 a8083063 Iustin Pop
  """
4056 a8083063 Iustin Pop
  HPATH = "instance-start"
4057 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4058 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
4059 e873317a Guido Trotter
  REQ_BGL = False
4060 e873317a Guido Trotter
4061 e873317a Guido Trotter
  def ExpandNames(self):
4062 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4063 a8083063 Iustin Pop
4064 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4065 a8083063 Iustin Pop
    """Build hooks env.
4066 a8083063 Iustin Pop

4067 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4068 a8083063 Iustin Pop

4069 a8083063 Iustin Pop
    """
4070 a8083063 Iustin Pop
    env = {
4071 a8083063 Iustin Pop
      "FORCE": self.op.force,
4072 a8083063 Iustin Pop
      }
4073 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4074 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4075 a8083063 Iustin Pop
    return env, nl, nl
4076 a8083063 Iustin Pop
4077 a8083063 Iustin Pop
  def CheckPrereq(self):
4078 a8083063 Iustin Pop
    """Check prerequisites.
4079 a8083063 Iustin Pop

4080 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4081 a8083063 Iustin Pop

4082 a8083063 Iustin Pop
    """
4083 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4084 e873317a Guido Trotter
    assert self.instance is not None, \
4085 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4086 a8083063 Iustin Pop
4087 d04aaa2f Iustin Pop
    # extra beparams
4088 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
4089 d04aaa2f Iustin Pop
    if self.beparams:
4090 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
4091 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
4092 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
4093 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4094 d04aaa2f Iustin Pop
      # fill the beparams dict
4095 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
4096 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
4097 d04aaa2f Iustin Pop
4098 d04aaa2f Iustin Pop
    # extra hvparams
4099 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
4100 d04aaa2f Iustin Pop
    if self.hvparams:
4101 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
4102 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
4103 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
4104 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4105 d04aaa2f Iustin Pop
4106 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
4107 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4108 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
4109 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
4110 d04aaa2f Iustin Pop
                                    instance.hvparams)
4111 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
4112 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4113 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
4114 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4115 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
4116 d04aaa2f Iustin Pop
4117 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4118 7527a8a4 Iustin Pop
4119 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4120 5bbd3f7f Michael Hanselmann
    # check bridges existence
4121 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4122 a8083063 Iustin Pop
4123 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4124 f1926756 Guido Trotter
                                              instance.name,
4125 f1926756 Guido Trotter
                                              instance.hypervisor)
4126 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4127 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4128 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
4129 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
4130 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
4131 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
4132 d4f16fd9 Iustin Pop
4133 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4134 a8083063 Iustin Pop
    """Start the instance.
4135 a8083063 Iustin Pop

4136 a8083063 Iustin Pop
    """
4137 a8083063 Iustin Pop
    instance = self.instance
4138 a8083063 Iustin Pop
    force = self.op.force
4139 a8083063 Iustin Pop
4140 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
4141 fe482621 Iustin Pop
4142 a8083063 Iustin Pop
    node_current = instance.primary_node
4143 a8083063 Iustin Pop
4144 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
4145 a8083063 Iustin Pop
4146 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
4147 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
4148 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4149 dd279568 Iustin Pop
    if msg:
4150 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4151 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
4152 a8083063 Iustin Pop
4153 a8083063 Iustin Pop
4154 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
4155 bf6929a2 Alexander Schreiber
  """Reboot an instance.
4156 bf6929a2 Alexander Schreiber

4157 bf6929a2 Alexander Schreiber
  """
4158 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
4159 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
4160 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
4161 e873317a Guido Trotter
  REQ_BGL = False
4162 e873317a Guido Trotter
4163 17c3f802 Guido Trotter
  def CheckArguments(self):
4164 17c3f802 Guido Trotter
    """Check the arguments.
4165 17c3f802 Guido Trotter

4166 17c3f802 Guido Trotter
    """
4167 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4168 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4169 17c3f802 Guido Trotter
4170 e873317a Guido Trotter
  def ExpandNames(self):
4171 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
4172 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4173 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
4174 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
4175 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
4176 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4177 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
4178 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4179 bf6929a2 Alexander Schreiber
4180 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
4181 bf6929a2 Alexander Schreiber
    """Build hooks env.
4182 bf6929a2 Alexander Schreiber

4183 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
4184 bf6929a2 Alexander Schreiber

4185 bf6929a2 Alexander Schreiber
    """
4186 bf6929a2 Alexander Schreiber
    env = {
4187 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4188 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
4189 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4190 bf6929a2 Alexander Schreiber
      }
4191 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4192 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4193 bf6929a2 Alexander Schreiber
    return env, nl, nl
4194 bf6929a2 Alexander Schreiber
4195 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
4196 bf6929a2 Alexander Schreiber
    """Check prerequisites.
4197 bf6929a2 Alexander Schreiber

4198 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
4199 bf6929a2 Alexander Schreiber

4200 bf6929a2 Alexander Schreiber
    """
4201 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4202 e873317a Guido Trotter
    assert self.instance is not None, \
4203 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4204 bf6929a2 Alexander Schreiber
4205 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4206 7527a8a4 Iustin Pop
4207 5bbd3f7f Michael Hanselmann
    # check bridges existence
4208 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4209 bf6929a2 Alexander Schreiber
4210 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
4211 bf6929a2 Alexander Schreiber
    """Reboot the instance.
4212 bf6929a2 Alexander Schreiber

4213 bf6929a2 Alexander Schreiber
    """
4214 bf6929a2 Alexander Schreiber
    instance = self.instance
4215 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
4216 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
4217 bf6929a2 Alexander Schreiber
4218 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
4219 bf6929a2 Alexander Schreiber
4220 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4221 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
4222 ae48ac32 Iustin Pop
      for disk in instance.disks:
4223 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
4224 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
4225 17c3f802 Guido Trotter
                                             reboot_type,
4226 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4227 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
4228 bf6929a2 Alexander Schreiber
    else:
4229 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
4230 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
4231 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
4232 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4233 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
4234 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4235 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4236 dd279568 Iustin Pop
      if msg:
4237 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4238 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
4239 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
4240 bf6929a2 Alexander Schreiber
4241 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
4242 bf6929a2 Alexander Schreiber
4243 bf6929a2 Alexander Schreiber
4244 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
4245 a8083063 Iustin Pop
  """Shutdown an instance.
4246 a8083063 Iustin Pop

4247 a8083063 Iustin Pop
  """
4248 a8083063 Iustin Pop
  HPATH = "instance-stop"
4249 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4250 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4251 e873317a Guido Trotter
  REQ_BGL = False
4252 e873317a Guido Trotter
4253 6263189c Guido Trotter
  def CheckArguments(self):
4254 6263189c Guido Trotter
    """Check the arguments.
4255 6263189c Guido Trotter

4256 6263189c Guido Trotter
    """
4257 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
4258 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
4259 6263189c Guido Trotter
4260 e873317a Guido Trotter
  def ExpandNames(self):
4261 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4262 a8083063 Iustin Pop
4263 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4264 a8083063 Iustin Pop
    """Build hooks env.
4265 a8083063 Iustin Pop

4266 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4267 a8083063 Iustin Pop

4268 a8083063 Iustin Pop
    """
4269 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4270 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
4271 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4272 a8083063 Iustin Pop
    return env, nl, nl
4273 a8083063 Iustin Pop
4274 a8083063 Iustin Pop
  def CheckPrereq(self):
4275 a8083063 Iustin Pop
    """Check prerequisites.
4276 a8083063 Iustin Pop

4277 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4278 a8083063 Iustin Pop

4279 a8083063 Iustin Pop
    """
4280 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4281 e873317a Guido Trotter
    assert self.instance is not None, \
4282 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4283 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
4284 a8083063 Iustin Pop
4285 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4286 a8083063 Iustin Pop
    """Shutdown the instance.
4287 a8083063 Iustin Pop

4288 a8083063 Iustin Pop
    """
4289 a8083063 Iustin Pop
    instance = self.instance
4290 a8083063 Iustin Pop
    node_current = instance.primary_node
4291 6263189c Guido Trotter
    timeout = self.timeout
4292 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
4293 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4294 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4295 1fae010f Iustin Pop
    if msg:
4296 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4297 a8083063 Iustin Pop
4298 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
4299 a8083063 Iustin Pop
4300 a8083063 Iustin Pop
4301 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
4302 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
4303 fe7b0351 Michael Hanselmann

4304 fe7b0351 Michael Hanselmann
  """
4305 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
4306 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
4307 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
4308 4e0b4d2d Guido Trotter
  REQ_BGL = False
4309 4e0b4d2d Guido Trotter
4310 4e0b4d2d Guido Trotter
  def ExpandNames(self):
4311 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
4312 fe7b0351 Michael Hanselmann
4313 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
4314 fe7b0351 Michael Hanselmann
    """Build hooks env.
4315 fe7b0351 Michael Hanselmann

4316 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
4317 fe7b0351 Michael Hanselmann

4318 fe7b0351 Michael Hanselmann
    """
4319 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4320 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4321 fe7b0351 Michael Hanselmann
    return env, nl, nl
4322 fe7b0351 Michael Hanselmann
4323 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
4324 fe7b0351 Michael Hanselmann
    """Check prerequisites.
4325 fe7b0351 Michael Hanselmann

4326 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
4327 fe7b0351 Michael Hanselmann

4328 fe7b0351 Michael Hanselmann
    """
4329 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4330 4e0b4d2d Guido Trotter
    assert instance is not None, \
4331 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4332 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4333 4e0b4d2d Guido Trotter
4334 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
4335 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4336 5c983ee5 Iustin Pop
                                 self.op.instance_name,
4337 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
4338 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot reinstall")
4339 d0834de3 Michael Hanselmann
4340 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
4341 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
4342 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4343 d0834de3 Michael Hanselmann
      # OS verification
4344 cf26a87a Iustin Pop
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4345 231cd901 Iustin Pop
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4346 d0834de3 Michael Hanselmann
4347 fe7b0351 Michael Hanselmann
    self.instance = instance
4348 fe7b0351 Michael Hanselmann
4349 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
4350 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
4351 fe7b0351 Michael Hanselmann

4352 fe7b0351 Michael Hanselmann
    """
4353 fe7b0351 Michael Hanselmann
    inst = self.instance
4354 fe7b0351 Michael Hanselmann
4355 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4356 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4357 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
4358 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
4359 d0834de3 Michael Hanselmann
4360 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4361 fe7b0351 Michael Hanselmann
    try:
4362 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
4363 4a0e011f Iustin Pop
      # FIXME: pass debug option from opcode to backend
4364 dd713605 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4365 dd713605 Iustin Pop
                                             self.op.debug_level)
4366 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
4367 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
4368 fe7b0351 Michael Hanselmann
    finally:
4369 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4370 fe7b0351 Michael Hanselmann
4371 fe7b0351 Michael Hanselmann
4372 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
4373 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
4374 bd315bfa Iustin Pop

4375 bd315bfa Iustin Pop
  """
4376 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
4377 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4378 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
4379 bd315bfa Iustin Pop
  REQ_BGL = False
4380 bd315bfa Iustin Pop
4381 bd315bfa Iustin Pop
  def CheckArguments(self):
4382 bd315bfa Iustin Pop
    """Check the arguments.
4383 bd315bfa Iustin Pop

4384 bd315bfa Iustin Pop
    """
4385 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
4386 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4387 bd315bfa Iustin Pop
    for item in self.op.disks:
4388 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
4389 bd315bfa Iustin Pop
          item < 0):
4390 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4391 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
4392 bd315bfa Iustin Pop
4393 bd315bfa Iustin Pop
  def ExpandNames(self):
4394 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
4395 bd315bfa Iustin Pop
4396 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
4397 bd315bfa Iustin Pop
    """Build hooks env.
4398 bd315bfa Iustin Pop

4399 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4400 bd315bfa Iustin Pop

4401 bd315bfa Iustin Pop
    """
4402 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4403 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4404 bd315bfa Iustin Pop
    return env, nl, nl
4405 bd315bfa Iustin Pop
4406 bd315bfa Iustin Pop
  def CheckPrereq(self):
4407 bd315bfa Iustin Pop
    """Check prerequisites.
4408 bd315bfa Iustin Pop

4409 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
4410 bd315bfa Iustin Pop

4411 bd315bfa Iustin Pop
    """
4412 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4413 bd315bfa Iustin Pop
    assert instance is not None, \
4414 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4415 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4416 bd315bfa Iustin Pop
4417 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
4418 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4419 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
4420 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4421 bd315bfa Iustin Pop
4422 bd315bfa Iustin Pop
    if not self.op.disks:
4423 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
4424 bd315bfa Iustin Pop
    else:
4425 bd315bfa Iustin Pop
      for idx in self.op.disks:
4426 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
4427 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4428 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
4429 bd315bfa Iustin Pop
4430 bd315bfa Iustin Pop
    self.instance = instance
4431 bd315bfa Iustin Pop
4432 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
4433 bd315bfa Iustin Pop
    """Recreate the disks.
4434 bd315bfa Iustin Pop

4435 bd315bfa Iustin Pop
    """
4436 bd315bfa Iustin Pop
    to_skip = []
4437 1122eb25 Iustin Pop
    for idx, _ in enumerate(self.instance.disks):
4438 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
4439 bd315bfa Iustin Pop
        to_skip.append(idx)
4440 bd315bfa Iustin Pop
        continue
4441 bd315bfa Iustin Pop
4442 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
4443 bd315bfa Iustin Pop
4444 bd315bfa Iustin Pop
4445 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
4446 decd5f45 Iustin Pop
  """Rename an instance.
4447 decd5f45 Iustin Pop

4448 decd5f45 Iustin Pop
  """
4449 decd5f45 Iustin Pop
  HPATH = "instance-rename"
4450 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4451 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
4452 decd5f45 Iustin Pop
4453 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
4454 decd5f45 Iustin Pop
    """Build hooks env.
4455 decd5f45 Iustin Pop

4456 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4457 decd5f45 Iustin Pop

4458 decd5f45 Iustin Pop
    """
4459 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4460 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4461 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4462 decd5f45 Iustin Pop
    return env, nl, nl
4463 decd5f45 Iustin Pop
4464 decd5f45 Iustin Pop
  def CheckPrereq(self):
4465 decd5f45 Iustin Pop
    """Check prerequisites.
4466 decd5f45 Iustin Pop

4467 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4468 decd5f45 Iustin Pop

4469 decd5f45 Iustin Pop
    """
4470 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4471 cf26a87a Iustin Pop
                                                self.op.instance_name)
4472 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4473 cf26a87a Iustin Pop
    assert instance is not None
4474 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4475 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot rename")
4476 decd5f45 Iustin Pop
    self.instance = instance
4477 decd5f45 Iustin Pop
4478 decd5f45 Iustin Pop
    # new name verification
4479 104f4ca1 Iustin Pop
    name_info = utils.GetHostInfo(self.op.new_name)
4480 decd5f45 Iustin Pop
4481 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4482 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4483 7bde3275 Guido Trotter
    if new_name in instance_list:
4484 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4485 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4486 7bde3275 Guido Trotter
4487 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4488 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4489 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4490 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4491 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4492 decd5f45 Iustin Pop
4493 decd5f45 Iustin Pop
4494 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4495 decd5f45 Iustin Pop
    """Reinstall the instance.
4496 decd5f45 Iustin Pop

4497 decd5f45 Iustin Pop
    """
4498 decd5f45 Iustin Pop
    inst = self.instance
4499 decd5f45 Iustin Pop
    old_name = inst.name
4500 decd5f45 Iustin Pop
4501 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4502 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4503 b23c4333 Manuel Franceschini
4504 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4505 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4506 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4507 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4508 decd5f45 Iustin Pop
4509 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4510 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4511 decd5f45 Iustin Pop
4512 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4513 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4514 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4515 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4516 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4517 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4518 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4519 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4520 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4521 b23c4333 Manuel Franceschini
4522 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4523 decd5f45 Iustin Pop
    try:
4524 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4525 dd713605 Iustin Pop
                                                 old_name, self.op.debug_level)
4526 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4527 96841384 Iustin Pop
      if msg:
4528 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4529 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4530 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4531 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4532 decd5f45 Iustin Pop
    finally:
4533 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4534 decd5f45 Iustin Pop
4535 decd5f45 Iustin Pop
4536 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4537 a8083063 Iustin Pop
  """Remove an instance.
4538 a8083063 Iustin Pop

4539 a8083063 Iustin Pop
  """
4540 a8083063 Iustin Pop
  HPATH = "instance-remove"
4541 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4542 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4543 cf472233 Guido Trotter
  REQ_BGL = False
4544 cf472233 Guido Trotter
4545 17c3f802 Guido Trotter
  def CheckArguments(self):
4546 17c3f802 Guido Trotter
    """Check the arguments.
4547 17c3f802 Guido Trotter

4548 17c3f802 Guido Trotter
    """
4549 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4550 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4551 17c3f802 Guido Trotter
4552 cf472233 Guido Trotter
  def ExpandNames(self):
4553 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4554 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4555 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4556 cf472233 Guido Trotter
4557 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4558 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4559 cf472233 Guido Trotter
      self._LockInstancesNodes()
4560 a8083063 Iustin Pop
4561 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4562 a8083063 Iustin Pop
    """Build hooks env.
4563 a8083063 Iustin Pop

4564 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4565 a8083063 Iustin Pop

4566 a8083063 Iustin Pop
    """
4567 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4568 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4569 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4570 abd8e836 Iustin Pop
    nl_post = list(self.instance.all_nodes) + nl
4571 abd8e836 Iustin Pop
    return env, nl, nl_post
4572 a8083063 Iustin Pop
4573 a8083063 Iustin Pop
  def CheckPrereq(self):
4574 a8083063 Iustin Pop
    """Check prerequisites.
4575 a8083063 Iustin Pop

4576 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4577 a8083063 Iustin Pop

4578 a8083063 Iustin Pop
    """
4579 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4580 cf472233 Guido Trotter
    assert self.instance is not None, \
4581 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4582 a8083063 Iustin Pop
4583 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4584 a8083063 Iustin Pop
    """Remove the instance.
4585 a8083063 Iustin Pop

4586 a8083063 Iustin Pop
    """
4587 a8083063 Iustin Pop
    instance = self.instance
4588 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4589 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4590 a8083063 Iustin Pop
4591 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4592 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4593 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4594 1fae010f Iustin Pop
    if msg:
4595 1d67656e Iustin Pop
      if self.op.ignore_failures:
4596 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4597 1d67656e Iustin Pop
      else:
4598 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4599 1fae010f Iustin Pop
                                 " node %s: %s" %
4600 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4601 a8083063 Iustin Pop
4602 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4603 a8083063 Iustin Pop
4604 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4605 1d67656e Iustin Pop
      if self.op.ignore_failures:
4606 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4607 1d67656e Iustin Pop
      else:
4608 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4609 a8083063 Iustin Pop
4610 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4611 a8083063 Iustin Pop
4612 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4613 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4614 a8083063 Iustin Pop
4615 a8083063 Iustin Pop
4616 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4617 a8083063 Iustin Pop
  """Logical unit for querying instances.
4618 a8083063 Iustin Pop

4619 a8083063 Iustin Pop
  """
4620 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
4621 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4622 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4623 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4624 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4625 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4626 5b460366 Iustin Pop
                                    "admin_state",
4627 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4628 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4629 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4630 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4631 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4632 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4633 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4634 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4635 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4636 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4637 19bed813 Iustin Pop
                                    "hvparams",
4638 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4639 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4640 7736a5f2 Iustin Pop
                                   for name in constants.HVS_PARAMETERS
4641 7736a5f2 Iustin Pop
                                   if name not in constants.HVC_GLOBALS] +
4642 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4643 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4644 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4645 31bf511f Iustin Pop
4646 a8083063 Iustin Pop
4647 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4648 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4649 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4650 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4651 a8083063 Iustin Pop
4652 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4653 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4654 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4655 7eb9d8f7 Guido Trotter
4656 57a2fb91 Iustin Pop
    if self.op.names:
4657 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4658 7eb9d8f7 Guido Trotter
    else:
4659 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4660 7eb9d8f7 Guido Trotter
4661 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4662 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4663 57a2fb91 Iustin Pop
    if self.do_locking:
4664 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4665 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4666 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4667 7eb9d8f7 Guido Trotter
4668 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4669 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4670 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4671 7eb9d8f7 Guido Trotter
4672 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4673 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4674 7eb9d8f7 Guido Trotter

4675 7eb9d8f7 Guido Trotter
    """
4676 57a2fb91 Iustin Pop
    pass
4677 069dcc86 Iustin Pop
4678 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4679 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4680 a8083063 Iustin Pop

4681 a8083063 Iustin Pop
    """
4682 7260cfbe Iustin Pop
    # pylint: disable-msg=R0912
4683 7260cfbe Iustin Pop
    # way too many branches here
4684 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4685 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4686 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4687 a7f5dc98 Iustin Pop
      if self.do_locking:
4688 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4689 a7f5dc98 Iustin Pop
      else:
4690 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4691 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4692 57a2fb91 Iustin Pop
    else:
4693 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4694 a7f5dc98 Iustin Pop
      if self.do_locking:
4695 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4696 a7f5dc98 Iustin Pop
      else:
4697 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4698 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4699 a7f5dc98 Iustin Pop
      if missing:
4700 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4701 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4702 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4703 c1f1cbb2 Iustin Pop
4704 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4705 a8083063 Iustin Pop
4706 a8083063 Iustin Pop
    # begin data gathering
4707 a8083063 Iustin Pop
4708 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4709 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4710 a8083063 Iustin Pop
4711 a8083063 Iustin Pop
    bad_nodes = []
4712 cbfc4681 Iustin Pop
    off_nodes = []
4713 ec79568d Iustin Pop
    if self.do_node_query:
4714 a8083063 Iustin Pop
      live_data = {}
4715 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4716 a8083063 Iustin Pop
      for name in nodes:
4717 a8083063 Iustin Pop
        result = node_data[name]
4718 cbfc4681 Iustin Pop
        if result.offline:
4719 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4720 cbfc4681 Iustin Pop
          off_nodes.append(name)
4721 3cebe102 Michael Hanselmann
        if result.fail_msg:
4722 a8083063 Iustin Pop
          bad_nodes.append(name)
4723 781de953 Iustin Pop
        else:
4724 2fa74ef4 Iustin Pop
          if result.payload:
4725 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4726 2fa74ef4 Iustin Pop
          # else no instance is alive
4727 a8083063 Iustin Pop
    else:
4728 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4729 a8083063 Iustin Pop
4730 a8083063 Iustin Pop
    # end data gathering
4731 a8083063 Iustin Pop
4732 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4733 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4734 a8083063 Iustin Pop
    output = []
4735 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4736 a8083063 Iustin Pop
    for instance in instance_list:
4737 a8083063 Iustin Pop
      iout = []
4738 7736a5f2 Iustin Pop
      i_hv = cluster.FillHV(instance, skip_globals=True)
4739 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4740 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4741 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4742 a8083063 Iustin Pop
      for field in self.op.output_fields:
4743 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4744 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4745 19bed813 Iustin Pop
          val = getattr(instance, field)
4746 a8083063 Iustin Pop
        elif field == "pnode":
4747 a8083063 Iustin Pop
          val = instance.primary_node
4748 a8083063 Iustin Pop
        elif field == "snodes":
4749 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4750 a8083063 Iustin Pop
        elif field == "admin_state":
4751 0d68c45d Iustin Pop
          val = instance.admin_up
4752 a8083063 Iustin Pop
        elif field == "oper_state":
4753 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4754 8a23d2d3 Iustin Pop
            val = None
4755 a8083063 Iustin Pop
          else:
4756 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4757 d8052456 Iustin Pop
        elif field == "status":
4758 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4759 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4760 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4761 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4762 d8052456 Iustin Pop
          else:
4763 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4764 d8052456 Iustin Pop
            if running:
4765 0d68c45d Iustin Pop
              if instance.admin_up:
4766 d8052456 Iustin Pop
                val = "running"
4767 d8052456 Iustin Pop
              else:
4768 d8052456 Iustin Pop
                val = "ERROR_up"
4769 d8052456 Iustin Pop
            else:
4770 0d68c45d Iustin Pop
              if instance.admin_up:
4771 d8052456 Iustin Pop
                val = "ERROR_down"
4772 d8052456 Iustin Pop
              else:
4773 d8052456 Iustin Pop
                val = "ADMIN_down"
4774 a8083063 Iustin Pop
        elif field == "oper_ram":
4775 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4776 8a23d2d3 Iustin Pop
            val = None
4777 a8083063 Iustin Pop
          elif instance.name in live_data:
4778 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4779 a8083063 Iustin Pop
          else:
4780 a8083063 Iustin Pop
            val = "-"
4781 c1ce76bb Iustin Pop
        elif field == "vcpus":
4782 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4783 a8083063 Iustin Pop
        elif field == "disk_template":
4784 a8083063 Iustin Pop
          val = instance.disk_template
4785 a8083063 Iustin Pop
        elif field == "ip":
4786 39a02558 Guido Trotter
          if instance.nics:
4787 39a02558 Guido Trotter
            val = instance.nics[0].ip
4788 39a02558 Guido Trotter
          else:
4789 39a02558 Guido Trotter
            val = None
4790 638c6349 Guido Trotter
        elif field == "nic_mode":
4791 638c6349 Guido Trotter
          if instance.nics:
4792 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4793 638c6349 Guido Trotter
          else:
4794 638c6349 Guido Trotter
            val = None
4795 638c6349 Guido Trotter
        elif field == "nic_link":
4796 39a02558 Guido Trotter
          if instance.nics:
4797 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4798 638c6349 Guido Trotter
          else:
4799 638c6349 Guido Trotter
            val = None
4800 638c6349 Guido Trotter
        elif field == "bridge":
4801 638c6349 Guido Trotter
          if (instance.nics and
4802 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4803 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4804 39a02558 Guido Trotter
          else:
4805 39a02558 Guido Trotter
            val = None
4806 a8083063 Iustin Pop
        elif field == "mac":
4807 39a02558 Guido Trotter
          if instance.nics:
4808 39a02558 Guido Trotter
            val = instance.nics[0].mac
4809 39a02558 Guido Trotter
          else:
4810 39a02558 Guido Trotter
            val = None
4811 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4812 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4813 ad24e046 Iustin Pop
          try:
4814 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4815 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4816 8a23d2d3 Iustin Pop
            val = None
4817 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4818 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4819 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4820 130a6a6f Iustin Pop
        elif field == "tags":
4821 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4822 338e51e8 Iustin Pop
        elif field == "hvparams":
4823 338e51e8 Iustin Pop
          val = i_hv
4824 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4825 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4826 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4827 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4828 338e51e8 Iustin Pop
        elif field == "beparams":
4829 338e51e8 Iustin Pop
          val = i_be
4830 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4831 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4832 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4833 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4834 71c1af58 Iustin Pop
          # matches a variable list
4835 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4836 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4837 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4838 71c1af58 Iustin Pop
              val = len(instance.disks)
4839 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4840 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4841 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4842 3e0cea06 Iustin Pop
              try:
4843 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4844 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4845 71c1af58 Iustin Pop
                val = None
4846 71c1af58 Iustin Pop
            else:
4847 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4848 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4849 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4850 71c1af58 Iustin Pop
              val = len(instance.nics)
4851 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4852 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4853 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4854 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4855 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4856 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4857 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4858 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4859 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4860 638c6349 Guido Trotter
              val = []
4861 638c6349 Guido Trotter
              for nicp in i_nicp:
4862 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4863 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4864 638c6349 Guido Trotter
                else:
4865 638c6349 Guido Trotter
                  val.append(None)
4866 71c1af58 Iustin Pop
            else:
4867 71c1af58 Iustin Pop
              # index-based item
4868 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4869 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4870 71c1af58 Iustin Pop
                val = None
4871 71c1af58 Iustin Pop
              else:
4872 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4873 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4874 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4875 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4876 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4877 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4878 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4879 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4880 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4881 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4882 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4883 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4884 638c6349 Guido Trotter
                  else:
4885 638c6349 Guido Trotter
                    val = None
4886 71c1af58 Iustin Pop
                else:
4887 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4888 71c1af58 Iustin Pop
          else:
4889 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4890 c1ce76bb Iustin Pop
                           field)
4891 a8083063 Iustin Pop
        else:
4892 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4893 a8083063 Iustin Pop
        iout.append(val)
4894 a8083063 Iustin Pop
      output.append(iout)
4895 a8083063 Iustin Pop
4896 a8083063 Iustin Pop
    return output
4897 a8083063 Iustin Pop
4898 a8083063 Iustin Pop
4899 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4900 a8083063 Iustin Pop
  """Failover an instance.
4901 a8083063 Iustin Pop

4902 a8083063 Iustin Pop
  """
4903 a8083063 Iustin Pop
  HPATH = "instance-failover"
4904 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4905 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4906 c9e5c064 Guido Trotter
  REQ_BGL = False
4907 c9e5c064 Guido Trotter
4908 17c3f802 Guido Trotter
  def CheckArguments(self):
4909 17c3f802 Guido Trotter
    """Check the arguments.
4910 17c3f802 Guido Trotter

4911 17c3f802 Guido Trotter
    """
4912 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4913 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4914 17c3f802 Guido Trotter
4915 c9e5c064 Guido Trotter
  def ExpandNames(self):
4916 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4917 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4918 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4919 c9e5c064 Guido Trotter
4920 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4921 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4922 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4923 a8083063 Iustin Pop
4924 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4925 a8083063 Iustin Pop
    """Build hooks env.
4926 a8083063 Iustin Pop

4927 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4928 a8083063 Iustin Pop

4929 a8083063 Iustin Pop
    """
4930 08eec276 Iustin Pop
    instance = self.instance
4931 08eec276 Iustin Pop
    source_node = instance.primary_node
4932 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4933 a8083063 Iustin Pop
    env = {
4934 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4935 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4936 08eec276 Iustin Pop
      "OLD_PRIMARY": source_node,
4937 08eec276 Iustin Pop
      "OLD_SECONDARY": target_node,
4938 08eec276 Iustin Pop
      "NEW_PRIMARY": target_node,
4939 08eec276 Iustin Pop
      "NEW_SECONDARY": source_node,
4940 a8083063 Iustin Pop
      }
4941 08eec276 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4942 08eec276 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4943 abd8e836 Iustin Pop
    nl_post = list(nl)
4944 abd8e836 Iustin Pop
    nl_post.append(source_node)
4945 abd8e836 Iustin Pop
    return env, nl, nl_post
4946 a8083063 Iustin Pop
4947 a8083063 Iustin Pop
  def CheckPrereq(self):
4948 a8083063 Iustin Pop
    """Check prerequisites.
4949 a8083063 Iustin Pop

4950 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4951 a8083063 Iustin Pop

4952 a8083063 Iustin Pop
    """
4953 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4954 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4955 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4956 a8083063 Iustin Pop
4957 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4958 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4959 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4960 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
4961 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
4962 2a710df1 Michael Hanselmann
4963 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4964 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4965 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4966 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4967 2a710df1 Michael Hanselmann
4968 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4969 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4970 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4971 d27776f0 Iustin Pop
    if instance.admin_up:
4972 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4973 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4974 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4975 d27776f0 Iustin Pop
                           instance.hypervisor)
4976 d27776f0 Iustin Pop
    else:
4977 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4978 d27776f0 Iustin Pop
                   " instance will not be started")
4979 3a7c308e Guido Trotter
4980 a8083063 Iustin Pop
    # check bridge existance
4981 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4982 a8083063 Iustin Pop
4983 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4984 a8083063 Iustin Pop
    """Failover an instance.
4985 a8083063 Iustin Pop

4986 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4987 a8083063 Iustin Pop
    starting it on the secondary.
4988 a8083063 Iustin Pop

4989 a8083063 Iustin Pop
    """
4990 a8083063 Iustin Pop
    instance = self.instance
4991 a8083063 Iustin Pop
4992 a8083063 Iustin Pop
    source_node = instance.primary_node
4993 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4994 a8083063 Iustin Pop
4995 1df79ce6 Michael Hanselmann
    if instance.admin_up:
4996 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
4997 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
4998 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
4999 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
5000 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
5001 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
5002 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
5003 1df79ce6 Michael Hanselmann
    else:
5004 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
5005 a8083063 Iustin Pop
5006 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
5007 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
5008 9a4f63d1 Iustin Pop
                 instance.name, source_node)
5009 a8083063 Iustin Pop
5010 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5011 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5012 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5013 1fae010f Iustin Pop
    if msg:
5014 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
5015 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5016 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
5017 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
5018 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
5019 24a40d57 Iustin Pop
      else:
5020 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5021 1fae010f Iustin Pop
                                 " node %s: %s" %
5022 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
5023 a8083063 Iustin Pop
5024 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
5025 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5026 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
5027 a8083063 Iustin Pop
5028 a8083063 Iustin Pop
    instance.primary_node = target_node
5029 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
5030 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5031 a8083063 Iustin Pop
5032 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
5033 0d68c45d Iustin Pop
    if instance.admin_up:
5034 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
5035 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
5036 9a4f63d1 Iustin Pop
                   instance.name, target_node)
5037 12a0cfbe Guido Trotter
5038 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5039 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
5040 12a0cfbe Guido Trotter
      if not disks_ok:
5041 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5042 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
5043 a8083063 Iustin Pop
5044 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
5045 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5046 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5047 dd279568 Iustin Pop
      if msg:
5048 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5049 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5050 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
5051 a8083063 Iustin Pop
5052 a8083063 Iustin Pop
5053 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
5054 53c776b5 Iustin Pop
  """Migrate an instance.
5055 53c776b5 Iustin Pop

5056 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
5057 53c776b5 Iustin Pop
  which is done with shutdown.
5058 53c776b5 Iustin Pop

5059 53c776b5 Iustin Pop
  """
5060 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
5061 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5062 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
5063 53c776b5 Iustin Pop
5064 53c776b5 Iustin Pop
  REQ_BGL = False
5065 53c776b5 Iustin Pop
5066 53c776b5 Iustin Pop
  def ExpandNames(self):
5067 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
5068 3e06e001 Michael Hanselmann
5069 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5070 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5071 53c776b5 Iustin Pop
5072 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5073 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
5074 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
5075 3e06e001 Michael Hanselmann
5076 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
5077 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
5078 53c776b5 Iustin Pop
      self._LockInstancesNodes()
5079 53c776b5 Iustin Pop
5080 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
5081 53c776b5 Iustin Pop
    """Build hooks env.
5082 53c776b5 Iustin Pop

5083 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5084 53c776b5 Iustin Pop

5085 53c776b5 Iustin Pop
    """
5086 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
5087 08eec276 Iustin Pop
    source_node = instance.primary_node
5088 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
5089 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
5090 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
5091 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5092 08eec276 Iustin Pop
    env.update({
5093 08eec276 Iustin Pop
        "OLD_PRIMARY": source_node,
5094 08eec276 Iustin Pop
        "OLD_SECONDARY": target_node,
5095 08eec276 Iustin Pop
        "NEW_PRIMARY": target_node,
5096 08eec276 Iustin Pop
        "NEW_SECONDARY": source_node,
5097 08eec276 Iustin Pop
        })
5098 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5099 abd8e836 Iustin Pop
    nl_post = list(nl)
5100 abd8e836 Iustin Pop
    nl_post.append(source_node)
5101 abd8e836 Iustin Pop
    return env, nl, nl_post
5102 53c776b5 Iustin Pop
5103 3e06e001 Michael Hanselmann
5104 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
5105 313bcead Iustin Pop
  """Move an instance by data-copying.
5106 313bcead Iustin Pop

5107 313bcead Iustin Pop
  """
5108 313bcead Iustin Pop
  HPATH = "instance-move"
5109 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5110 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
5111 313bcead Iustin Pop
  REQ_BGL = False
5112 313bcead Iustin Pop
5113 17c3f802 Guido Trotter
  def CheckArguments(self):
5114 17c3f802 Guido Trotter
    """Check the arguments.
5115 17c3f802 Guido Trotter

5116 17c3f802 Guido Trotter
    """
5117 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5118 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5119 17c3f802 Guido Trotter
5120 313bcead Iustin Pop
  def ExpandNames(self):
5121 313bcead Iustin Pop
    self._ExpandAndLockInstance()
5122 cf26a87a Iustin Pop
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5123 313bcead Iustin Pop
    self.op.target_node = target_node
5124 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5125 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5126 313bcead Iustin Pop
5127 313bcead Iustin Pop
  def DeclareLocks(self, level):
5128 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
5129 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
5130 313bcead Iustin Pop
5131 313bcead Iustin Pop
  def BuildHooksEnv(self):
5132 313bcead Iustin Pop
    """Build hooks env.
5133 313bcead Iustin Pop

5134 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5135 313bcead Iustin Pop

5136 313bcead Iustin Pop
    """
5137 313bcead Iustin Pop
    env = {
5138 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
5139 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5140 313bcead Iustin Pop
      }
5141 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5142 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5143 313bcead Iustin Pop
                                       self.op.target_node]
5144 313bcead Iustin Pop
    return env, nl, nl
5145 313bcead Iustin Pop
5146 313bcead Iustin Pop
  def CheckPrereq(self):
5147 313bcead Iustin Pop
    """Check prerequisites.
5148 313bcead Iustin Pop

5149 313bcead Iustin Pop
    This checks that the instance is in the cluster.
5150 313bcead Iustin Pop

5151 313bcead Iustin Pop
    """
5152 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5153 313bcead Iustin Pop
    assert self.instance is not None, \
5154 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
5155 313bcead Iustin Pop
5156 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
5157 313bcead Iustin Pop
    assert node is not None, \
5158 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
5159 313bcead Iustin Pop
5160 313bcead Iustin Pop
    self.target_node = target_node = node.name
5161 313bcead Iustin Pop
5162 313bcead Iustin Pop
    if target_node == instance.primary_node:
5163 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5164 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
5165 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5166 313bcead Iustin Pop
5167 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5168 313bcead Iustin Pop
5169 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
5170 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5171 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5172 d1b83918 Iustin Pop
                                   " cannot copy" % idx, errors.ECODE_STATE)
5173 313bcead Iustin Pop
5174 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
5175 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5176 313bcead Iustin Pop
5177 313bcead Iustin Pop
    if instance.admin_up:
5178 313bcead Iustin Pop
      # check memory requirements on the secondary node
5179 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5180 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5181 313bcead Iustin Pop
                           instance.hypervisor)
5182 313bcead Iustin Pop
    else:
5183 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5184 313bcead Iustin Pop
                   " instance will not be started")
5185 313bcead Iustin Pop
5186 313bcead Iustin Pop
    # check bridge existance
5187 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5188 313bcead Iustin Pop
5189 313bcead Iustin Pop
  def Exec(self, feedback_fn):
5190 313bcead Iustin Pop
    """Move an instance.
5191 313bcead Iustin Pop

5192 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
5193 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
5194 313bcead Iustin Pop

5195 313bcead Iustin Pop
    """
5196 313bcead Iustin Pop
    instance = self.instance
5197 313bcead Iustin Pop
5198 313bcead Iustin Pop
    source_node = instance.primary_node
5199 313bcead Iustin Pop
    target_node = self.target_node
5200 313bcead Iustin Pop
5201 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
5202 313bcead Iustin Pop
                 instance.name, source_node)
5203 313bcead Iustin Pop
5204 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5205 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5206 313bcead Iustin Pop
    msg = result.fail_msg
5207 313bcead Iustin Pop
    if msg:
5208 313bcead Iustin Pop
      if self.op.ignore_consistency:
5209 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5210 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
5211 313bcead Iustin Pop
                             " %s is down. Error details: %s",
5212 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
5213 313bcead Iustin Pop
      else:
5214 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5215 313bcead Iustin Pop
                                 " node %s: %s" %
5216 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
5217 313bcead Iustin Pop
5218 313bcead Iustin Pop
    # create the target disks
5219 313bcead Iustin Pop
    try:
5220 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
5221 313bcead Iustin Pop
    except errors.OpExecError:
5222 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5223 313bcead Iustin Pop
      try:
5224 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5225 313bcead Iustin Pop
      finally:
5226 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5227 313bcead Iustin Pop
        raise
5228 313bcead Iustin Pop
5229 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5230 313bcead Iustin Pop
5231 313bcead Iustin Pop
    errs = []
5232 313bcead Iustin Pop
    # activate, get path, copy the data over
5233 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
5234 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
5235 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5236 313bcead Iustin Pop
                                               instance.name, True)
5237 313bcead Iustin Pop
      if result.fail_msg:
5238 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
5239 313bcead Iustin Pop
                        idx, result.fail_msg)
5240 313bcead Iustin Pop
        errs.append(result.fail_msg)
5241 313bcead Iustin Pop
        break
5242 313bcead Iustin Pop
      dev_path = result.payload
5243 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
5244 313bcead Iustin Pop
                                             target_node, dev_path,
5245 313bcead Iustin Pop
                                             cluster_name)
5246 313bcead Iustin Pop
      if result.fail_msg:
5247 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
5248 313bcead Iustin Pop
                        idx, result.fail_msg)
5249 313bcead Iustin Pop
        errs.append(result.fail_msg)
5250 313bcead Iustin Pop
        break
5251 313bcead Iustin Pop
5252 313bcead Iustin Pop
    if errs:
5253 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
5254 313bcead Iustin Pop
      try:
5255 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5256 313bcead Iustin Pop
      finally:
5257 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5258 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
5259 313bcead Iustin Pop
                                 (",".join(errs),))
5260 313bcead Iustin Pop
5261 313bcead Iustin Pop
    instance.primary_node = target_node
5262 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5263 313bcead Iustin Pop
5264 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
5265 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
5266 313bcead Iustin Pop
5267 313bcead Iustin Pop
    # Only start the instance if it's marked as up
5268 313bcead Iustin Pop
    if instance.admin_up:
5269 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
5270 313bcead Iustin Pop
                   instance.name, target_node)
5271 313bcead Iustin Pop
5272 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5273 313bcead Iustin Pop
                                           ignore_secondaries=True)
5274 313bcead Iustin Pop
      if not disks_ok:
5275 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5276 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
5277 313bcead Iustin Pop
5278 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5279 313bcead Iustin Pop
      msg = result.fail_msg
5280 313bcead Iustin Pop
      if msg:
5281 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5282 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5283 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
5284 313bcead Iustin Pop
5285 313bcead Iustin Pop
5286 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
5287 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
5288 80cb875c Michael Hanselmann

5289 80cb875c Michael Hanselmann
  """
5290 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
5291 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
5292 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
5293 80cb875c Michael Hanselmann
  REQ_BGL = False
5294 80cb875c Michael Hanselmann
5295 80cb875c Michael Hanselmann
  def ExpandNames(self):
5296 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5297 80cb875c Michael Hanselmann
5298 80cb875c Michael Hanselmann
    self.needed_locks = {
5299 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
5300 80cb875c Michael Hanselmann
      }
5301 80cb875c Michael Hanselmann
5302 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5303 80cb875c Michael Hanselmann
5304 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
5305 80cb875c Michael Hanselmann
    names = []
5306 80cb875c Michael Hanselmann
    tasklets = []
5307 80cb875c Michael Hanselmann
5308 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5309 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
5310 80cb875c Michael Hanselmann
      names.append(inst.name)
5311 80cb875c Michael Hanselmann
5312 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5313 80cb875c Michael Hanselmann
5314 80cb875c Michael Hanselmann
    self.tasklets = tasklets
5315 80cb875c Michael Hanselmann
5316 80cb875c Michael Hanselmann
    # Declare instance locks
5317 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5318 80cb875c Michael Hanselmann
5319 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
5320 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
5321 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
5322 80cb875c Michael Hanselmann
5323 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
5324 80cb875c Michael Hanselmann
    """Build hooks env.
5325 80cb875c Michael Hanselmann

5326 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
5327 80cb875c Michael Hanselmann

5328 80cb875c Michael Hanselmann
    """
5329 80cb875c Michael Hanselmann
    env = {
5330 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
5331 80cb875c Michael Hanselmann
      }
5332 80cb875c Michael Hanselmann
5333 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
5334 80cb875c Michael Hanselmann
5335 80cb875c Michael Hanselmann
    return (env, nl, nl)
5336 80cb875c Michael Hanselmann
5337 80cb875c Michael Hanselmann
5338 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
5339 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
5340 3e06e001 Michael Hanselmann
    """Initializes this class.
5341 3e06e001 Michael Hanselmann

5342 3e06e001 Michael Hanselmann
    """
5343 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
5344 464243a7 Michael Hanselmann
5345 3e06e001 Michael Hanselmann
    # Parameters
5346 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
5347 3e06e001 Michael Hanselmann
    self.live = live
5348 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
5349 3e06e001 Michael Hanselmann
5350 53c776b5 Iustin Pop
  def CheckPrereq(self):
5351 53c776b5 Iustin Pop
    """Check prerequisites.
5352 53c776b5 Iustin Pop

5353 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
5354 53c776b5 Iustin Pop

5355 53c776b5 Iustin Pop
    """
5356 cf26a87a Iustin Pop
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5357 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(instance_name)
5358 cf26a87a Iustin Pop
    assert instance is not None
5359 53c776b5 Iustin Pop
5360 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5361 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
5362 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5363 53c776b5 Iustin Pop
5364 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
5365 53c776b5 Iustin Pop
    if not secondary_nodes:
5366 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
5367 733a2b6a Iustin Pop
                                      " drbd8 disk template")
5368 53c776b5 Iustin Pop
5369 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5370 53c776b5 Iustin Pop
5371 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
5372 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
5373 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5374 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
5375 53c776b5 Iustin Pop
                         instance.hypervisor)
5376 53c776b5 Iustin Pop
5377 53c776b5 Iustin Pop
    # check bridge existance
5378 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5379 53c776b5 Iustin Pop
5380 3e06e001 Michael Hanselmann
    if not self.cleanup:
5381 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
5382 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
5383 53c776b5 Iustin Pop
                                                 instance)
5384 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
5385 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
5386 53c776b5 Iustin Pop
5387 53c776b5 Iustin Pop
    self.instance = instance
5388 53c776b5 Iustin Pop
5389 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
5390 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
5391 53c776b5 Iustin Pop

5392 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
5393 53c776b5 Iustin Pop

5394 53c776b5 Iustin Pop
    """
5395 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
5396 53c776b5 Iustin Pop
    all_done = False
5397 53c776b5 Iustin Pop
    while not all_done:
5398 53c776b5 Iustin Pop
      all_done = True
5399 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5400 53c776b5 Iustin Pop
                                            self.nodes_ip,
5401 53c776b5 Iustin Pop
                                            self.instance.disks)
5402 53c776b5 Iustin Pop
      min_percent = 100
5403 53c776b5 Iustin Pop
      for node, nres in result.items():
5404 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
5405 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
5406 53c776b5 Iustin Pop
        all_done = all_done and node_done
5407 53c776b5 Iustin Pop
        if node_percent is not None:
5408 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
5409 53c776b5 Iustin Pop
      if not all_done:
5410 53c776b5 Iustin Pop
        if min_percent < 100:
5411 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5412 53c776b5 Iustin Pop
        time.sleep(2)
5413 53c776b5 Iustin Pop
5414 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
5415 53c776b5 Iustin Pop
    """Demote a node to secondary.
5416 53c776b5 Iustin Pop

5417 53c776b5 Iustin Pop
    """
5418 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
5419 53c776b5 Iustin Pop
5420 53c776b5 Iustin Pop
    for dev in self.instance.disks:
5421 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
5422 53c776b5 Iustin Pop
5423 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5424 53c776b5 Iustin Pop
                                          self.instance.disks)
5425 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
5426 53c776b5 Iustin Pop
5427 53c776b5 Iustin Pop
  def _GoStandalone(self):
5428 53c776b5 Iustin Pop
    """Disconnect from the network.
5429 53c776b5 Iustin Pop

5430 53c776b5 Iustin Pop
    """
5431 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
5432 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5433 53c776b5 Iustin Pop
                                               self.instance.disks)
5434 53c776b5 Iustin Pop
    for node, nres in result.items():
5435 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
5436 53c776b5 Iustin Pop
5437 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
5438 53c776b5 Iustin Pop
    """Reconnect to the network.
5439 53c776b5 Iustin Pop

5440 53c776b5 Iustin Pop
    """
5441 53c776b5 Iustin Pop
    if multimaster:
5442 53c776b5 Iustin Pop
      msg = "dual-master"
5443 53c776b5 Iustin Pop
    else:
5444 53c776b5 Iustin Pop
      msg = "single-master"
5445 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
5446 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5447 53c776b5 Iustin Pop
                                           self.instance.disks,
5448 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
5449 53c776b5 Iustin Pop
    for node, nres in result.items():
5450 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
5451 53c776b5 Iustin Pop
5452 53c776b5 Iustin Pop
  def _ExecCleanup(self):
5453 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
5454 53c776b5 Iustin Pop

5455 53c776b5 Iustin Pop
    The cleanup is done by:
5456 53c776b5 Iustin Pop
      - check that the instance is running only on one node
5457 53c776b5 Iustin Pop
        (and update the config if needed)
5458 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
5459 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5460 53c776b5 Iustin Pop
      - disconnect from the network
5461 53c776b5 Iustin Pop
      - change disks into single-master mode
5462 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
5463 53c776b5 Iustin Pop

5464 53c776b5 Iustin Pop
    """
5465 53c776b5 Iustin Pop
    instance = self.instance
5466 53c776b5 Iustin Pop
    target_node = self.target_node
5467 53c776b5 Iustin Pop
    source_node = self.source_node
5468 53c776b5 Iustin Pop
5469 53c776b5 Iustin Pop
    # check running on only one node
5470 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5471 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5472 53c776b5 Iustin Pop
                     " a bad state)")
5473 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5474 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5475 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5476 53c776b5 Iustin Pop
5477 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5478 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5479 53c776b5 Iustin Pop
5480 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5481 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5482 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5483 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5484 53c776b5 Iustin Pop
                               " and restart this operation.")
5485 53c776b5 Iustin Pop
5486 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5487 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5488 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5489 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5490 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5491 53c776b5 Iustin Pop
5492 53c776b5 Iustin Pop
    if runningon_target:
5493 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5494 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5495 53c776b5 Iustin Pop
                       " updating config" % target_node)
5496 53c776b5 Iustin Pop
      instance.primary_node = target_node
5497 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5498 53c776b5 Iustin Pop
      demoted_node = source_node
5499 53c776b5 Iustin Pop
    else:
5500 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5501 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5502 53c776b5 Iustin Pop
      demoted_node = target_node
5503 53c776b5 Iustin Pop
5504 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5505 53c776b5 Iustin Pop
    try:
5506 53c776b5 Iustin Pop
      self._WaitUntilSync()
5507 53c776b5 Iustin Pop
    except errors.OpExecError:
5508 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5509 53c776b5 Iustin Pop
      # won't be able to sync
5510 53c776b5 Iustin Pop
      pass
5511 53c776b5 Iustin Pop
    self._GoStandalone()
5512 53c776b5 Iustin Pop
    self._GoReconnect(False)
5513 53c776b5 Iustin Pop
    self._WaitUntilSync()
5514 53c776b5 Iustin Pop
5515 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5516 53c776b5 Iustin Pop
5517 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5518 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5519 6906a9d8 Guido Trotter

5520 6906a9d8 Guido Trotter
    """
5521 6906a9d8 Guido Trotter
    target_node = self.target_node
5522 6906a9d8 Guido Trotter
    try:
5523 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5524 6906a9d8 Guido Trotter
      self._GoStandalone()
5525 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5526 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5527 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5528 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5529 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5530 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5531 3e06e001 Michael Hanselmann
                         str(err))
5532 6906a9d8 Guido Trotter
5533 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5534 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5535 6906a9d8 Guido Trotter

5536 6906a9d8 Guido Trotter
    """
5537 6906a9d8 Guido Trotter
    instance = self.instance
5538 6906a9d8 Guido Trotter
    target_node = self.target_node
5539 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5540 6906a9d8 Guido Trotter
5541 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5542 6906a9d8 Guido Trotter
                                                    instance,
5543 6906a9d8 Guido Trotter
                                                    migration_info,
5544 6906a9d8 Guido Trotter
                                                    False)
5545 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5546 6906a9d8 Guido Trotter
    if abort_msg:
5547 099c52ad Iustin Pop
      logging.error("Aborting migration failed on target node %s: %s",
5548 099c52ad Iustin Pop
                    target_node, abort_msg)
5549 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5550 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5551 6906a9d8 Guido Trotter
5552 53c776b5 Iustin Pop
  def _ExecMigration(self):
5553 53c776b5 Iustin Pop
    """Migrate an instance.
5554 53c776b5 Iustin Pop

5555 53c776b5 Iustin Pop
    The migrate is done by:
5556 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5557 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5558 53c776b5 Iustin Pop
      - migrate the instance
5559 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5560 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5561 53c776b5 Iustin Pop
      - change disks into single-master mode
5562 53c776b5 Iustin Pop

5563 53c776b5 Iustin Pop
    """
5564 53c776b5 Iustin Pop
    instance = self.instance
5565 53c776b5 Iustin Pop
    target_node = self.target_node
5566 53c776b5 Iustin Pop
    source_node = self.source_node
5567 53c776b5 Iustin Pop
5568 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5569 53c776b5 Iustin Pop
    for dev in instance.disks:
5570 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5571 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5572 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5573 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5574 53c776b5 Iustin Pop
5575 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5576 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5577 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5578 6906a9d8 Guido Trotter
    if msg:
5579 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5580 0959c824 Iustin Pop
                 (source_node, msg))
5581 6906a9d8 Guido Trotter
      logging.error(log_err)
5582 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5583 6906a9d8 Guido Trotter
5584 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5585 6906a9d8 Guido Trotter
5586 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5587 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5588 53c776b5 Iustin Pop
    self._GoStandalone()
5589 53c776b5 Iustin Pop
    self._GoReconnect(True)
5590 53c776b5 Iustin Pop
    self._WaitUntilSync()
5591 53c776b5 Iustin Pop
5592 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5593 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5594 6906a9d8 Guido Trotter
                                           instance,
5595 6906a9d8 Guido Trotter
                                           migration_info,
5596 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5597 6906a9d8 Guido Trotter
5598 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5599 6906a9d8 Guido Trotter
    if msg:
5600 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5601 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5602 78212a5d Iustin Pop
      self.feedback_fn("Pre-migration failed, aborting")
5603 6906a9d8 Guido Trotter
      self._AbortMigration()
5604 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5605 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5606 6906a9d8 Guido Trotter
                               (instance.name, msg))
5607 6906a9d8 Guido Trotter
5608 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5609 53c776b5 Iustin Pop
    time.sleep(10)
5610 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5611 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5612 3e06e001 Michael Hanselmann
                                            self.live)
5613 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5614 53c776b5 Iustin Pop
    if msg:
5615 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5616 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5617 78212a5d Iustin Pop
      self.feedback_fn("Migration failed, aborting")
5618 6906a9d8 Guido Trotter
      self._AbortMigration()
5619 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5620 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5621 53c776b5 Iustin Pop
                               (instance.name, msg))
5622 53c776b5 Iustin Pop
    time.sleep(10)
5623 53c776b5 Iustin Pop
5624 53c776b5 Iustin Pop
    instance.primary_node = target_node
5625 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5626 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5627 53c776b5 Iustin Pop
5628 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5629 6906a9d8 Guido Trotter
                                              instance,
5630 6906a9d8 Guido Trotter
                                              migration_info,
5631 6906a9d8 Guido Trotter
                                              True)
5632 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5633 6906a9d8 Guido Trotter
    if msg:
5634 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5635 099c52ad Iustin Pop
                    " %s", msg)
5636 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5637 6906a9d8 Guido Trotter
                               msg)
5638 6906a9d8 Guido Trotter
5639 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5640 53c776b5 Iustin Pop
    self._WaitUntilSync()
5641 53c776b5 Iustin Pop
    self._GoStandalone()
5642 53c776b5 Iustin Pop
    self._GoReconnect(False)
5643 53c776b5 Iustin Pop
    self._WaitUntilSync()
5644 53c776b5 Iustin Pop
5645 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5646 53c776b5 Iustin Pop
5647 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5648 53c776b5 Iustin Pop
    """Perform the migration.
5649 53c776b5 Iustin Pop

5650 53c776b5 Iustin Pop
    """
5651 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5652 80cb875c Michael Hanselmann
5653 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5654 53c776b5 Iustin Pop
5655 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5656 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5657 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5658 53c776b5 Iustin Pop
    self.nodes_ip = {
5659 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5660 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5661 53c776b5 Iustin Pop
      }
5662 3e06e001 Michael Hanselmann
5663 3e06e001 Michael Hanselmann
    if self.cleanup:
5664 53c776b5 Iustin Pop
      return self._ExecCleanup()
5665 53c776b5 Iustin Pop
    else:
5666 53c776b5 Iustin Pop
      return self._ExecMigration()
5667 53c776b5 Iustin Pop
5668 53c776b5 Iustin Pop
5669 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5670 428958aa Iustin Pop
                    info, force_open):
5671 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5672 a8083063 Iustin Pop

5673 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5674 a8083063 Iustin Pop
  all its children.
5675 a8083063 Iustin Pop

5676 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5677 a8083063 Iustin Pop

5678 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5679 428958aa Iustin Pop
  @param node: the node on which to create the device
5680 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5681 428958aa Iustin Pop
  @param instance: the instance which owns the device
5682 428958aa Iustin Pop
  @type device: L{objects.Disk}
5683 428958aa Iustin Pop
  @param device: the device to create
5684 428958aa Iustin Pop
  @type force_create: boolean
5685 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5686 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5687 428958aa Iustin Pop
      CreateOnSecondary() attribute
5688 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5689 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5690 428958aa Iustin Pop
  @type force_open: boolean
5691 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5692 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5693 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5694 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5695 428958aa Iustin Pop

5696 a8083063 Iustin Pop
  """
5697 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5698 428958aa Iustin Pop
    force_create = True
5699 796cab27 Iustin Pop
5700 a8083063 Iustin Pop
  if device.children:
5701 a8083063 Iustin Pop
    for child in device.children:
5702 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5703 428958aa Iustin Pop
                      info, force_open)
5704 a8083063 Iustin Pop
5705 428958aa Iustin Pop
  if not force_create:
5706 796cab27 Iustin Pop
    return
5707 796cab27 Iustin Pop
5708 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5709 de12473a Iustin Pop
5710 de12473a Iustin Pop
5711 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5712 de12473a Iustin Pop
  """Create a single block device on a given node.
5713 de12473a Iustin Pop

5714 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5715 de12473a Iustin Pop
  created in advance.
5716 de12473a Iustin Pop

5717 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5718 de12473a Iustin Pop
  @param node: the node on which to create the device
5719 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5720 de12473a Iustin Pop
  @param instance: the instance which owns the device
5721 de12473a Iustin Pop
  @type device: L{objects.Disk}
5722 de12473a Iustin Pop
  @param device: the device to create
5723 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5724 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5725 de12473a Iustin Pop
  @type force_open: boolean
5726 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5727 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5728 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5729 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5730 de12473a Iustin Pop

5731 de12473a Iustin Pop
  """
5732 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5733 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5734 428958aa Iustin Pop
                                       instance.name, force_open, info)
5735 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5736 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5737 a8083063 Iustin Pop
  if device.physical_id is None:
5738 0959c824 Iustin Pop
    device.physical_id = result.payload
5739 a8083063 Iustin Pop
5740 a8083063 Iustin Pop
5741 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5742 923b1523 Iustin Pop
  """Generate a suitable LV name.
5743 923b1523 Iustin Pop

5744 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5745 923b1523 Iustin Pop

5746 923b1523 Iustin Pop
  """
5747 923b1523 Iustin Pop
  results = []
5748 923b1523 Iustin Pop
  for val in exts:
5749 4fae38c5 Guido Trotter
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5750 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5751 923b1523 Iustin Pop
  return results
5752 923b1523 Iustin Pop
5753 923b1523 Iustin Pop
5754 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5755 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5756 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5757 a1f445d3 Iustin Pop

5758 a1f445d3 Iustin Pop
  """
5759 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5760 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5761 afa1386e Guido Trotter
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5762 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5763 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5764 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5765 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5766 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5767 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5768 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5769 f9518d38 Iustin Pop
                                      shared_secret),
5770 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5771 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5772 a1f445d3 Iustin Pop
  return drbd_dev
5773 a1f445d3 Iustin Pop
5774 7c0d6283 Michael Hanselmann
5775 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5776 a8083063 Iustin Pop
                          instance_name, primary_node,
5777 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5778 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5779 e2a65344 Iustin Pop
                          base_index):
5780 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5781 a8083063 Iustin Pop

5782 a8083063 Iustin Pop
  """
5783 a8083063 Iustin Pop
  #TODO: compute space requirements
5784 a8083063 Iustin Pop
5785 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5786 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5787 08db7c5c Iustin Pop
  disks = []
5788 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5789 08db7c5c Iustin Pop
    pass
5790 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5791 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5792 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5793 923b1523 Iustin Pop
5794 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5795 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5796 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5797 e2a65344 Iustin Pop
      disk_index = idx + base_index
5798 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5799 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5800 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5801 6ec66eae Iustin Pop
                              mode=disk["mode"])
5802 08db7c5c Iustin Pop
      disks.append(disk_dev)
5803 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5804 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5805 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5806 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5807 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5808 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5809 08db7c5c Iustin Pop
5810 e6c1ff2f Iustin Pop
    names = []
5811 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5812 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5813 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5814 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5815 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5816 112050d9 Iustin Pop
      disk_index = idx + base_index
5817 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5818 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5819 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5820 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5821 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5822 08db7c5c Iustin Pop
      disks.append(disk_dev)
5823 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5824 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5825 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5826 0f1a06e3 Manuel Franceschini
5827 0e3baaf3 Iustin Pop
    _RequireFileStorage()
5828 0e3baaf3 Iustin Pop
5829 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5830 112050d9 Iustin Pop
      disk_index = idx + base_index
5831 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5832 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5833 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5834 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5835 43e99cff Guido Trotter
                                                         disk_index)),
5836 6ec66eae Iustin Pop
                              mode=disk["mode"])
5837 08db7c5c Iustin Pop
      disks.append(disk_dev)
5838 a8083063 Iustin Pop
  else:
5839 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5840 a8083063 Iustin Pop
  return disks
5841 a8083063 Iustin Pop
5842 a8083063 Iustin Pop
5843 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5844 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5845 3ecf6786 Iustin Pop

5846 3ecf6786 Iustin Pop
  """
5847 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5848 a0c3fea1 Michael Hanselmann
5849 a0c3fea1 Michael Hanselmann
5850 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5851 a8083063 Iustin Pop
  """Create all disks for an instance.
5852 a8083063 Iustin Pop

5853 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5854 a8083063 Iustin Pop

5855 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5856 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5857 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5858 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5859 bd315bfa Iustin Pop
  @type to_skip: list
5860 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5861 621b7678 Iustin Pop
  @type target_node: string
5862 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5863 e4376078 Iustin Pop
  @rtype: boolean
5864 e4376078 Iustin Pop
  @return: the success of the creation
5865 a8083063 Iustin Pop

5866 a8083063 Iustin Pop
  """
5867 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5868 621b7678 Iustin Pop
  if target_node is None:
5869 621b7678 Iustin Pop
    pnode = instance.primary_node
5870 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5871 621b7678 Iustin Pop
  else:
5872 621b7678 Iustin Pop
    pnode = target_node
5873 621b7678 Iustin Pop
    all_nodes = [pnode]
5874 a0c3fea1 Michael Hanselmann
5875 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5876 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5877 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5878 0f1a06e3 Manuel Franceschini
5879 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5880 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5881 0f1a06e3 Manuel Franceschini
5882 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5883 24991749 Iustin Pop
  # LUSetInstanceParams
5884 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5885 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5886 bd315bfa Iustin Pop
      continue
5887 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5888 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5889 a8083063 Iustin Pop
    #HARDCODE
5890 621b7678 Iustin Pop
    for node in all_nodes:
5891 428958aa Iustin Pop
      f_create = node == pnode
5892 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5893 a8083063 Iustin Pop
5894 a8083063 Iustin Pop
5895 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5896 a8083063 Iustin Pop
  """Remove all disks for an instance.
5897 a8083063 Iustin Pop

5898 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5899 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5900 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5901 a8083063 Iustin Pop
  with `_CreateDisks()`).
5902 a8083063 Iustin Pop

5903 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5904 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5905 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5906 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5907 621b7678 Iustin Pop
  @type target_node: string
5908 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5909 e4376078 Iustin Pop
  @rtype: boolean
5910 e4376078 Iustin Pop
  @return: the success of the removal
5911 a8083063 Iustin Pop

5912 a8083063 Iustin Pop
  """
5913 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5914 a8083063 Iustin Pop
5915 e1bc0878 Iustin Pop
  all_result = True
5916 a8083063 Iustin Pop
  for device in instance.disks:
5917 621b7678 Iustin Pop
    if target_node:
5918 621b7678 Iustin Pop
      edata = [(target_node, device)]
5919 621b7678 Iustin Pop
    else:
5920 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5921 621b7678 Iustin Pop
    for node, disk in edata:
5922 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5923 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5924 e1bc0878 Iustin Pop
      if msg:
5925 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5926 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5927 e1bc0878 Iustin Pop
        all_result = False
5928 0f1a06e3 Manuel Franceschini
5929 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5930 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5931 dfc2a24c Guido Trotter
    if target_node:
5932 dfc2a24c Guido Trotter
      tgt = target_node
5933 621b7678 Iustin Pop
    else:
5934 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5935 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5936 621b7678 Iustin Pop
    if result.fail_msg:
5937 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5938 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5939 e1bc0878 Iustin Pop
      all_result = False
5940 0f1a06e3 Manuel Franceschini
5941 e1bc0878 Iustin Pop
  return all_result
5942 a8083063 Iustin Pop
5943 a8083063 Iustin Pop
5944 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5945 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5946 e2fe6369 Iustin Pop

5947 e2fe6369 Iustin Pop
  """
5948 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5949 e2fe6369 Iustin Pop
  req_size_dict = {
5950 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5951 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5952 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5953 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5954 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5955 e2fe6369 Iustin Pop
  }
5956 e2fe6369 Iustin Pop
5957 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5958 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5959 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5960 e2fe6369 Iustin Pop
5961 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5962 e2fe6369 Iustin Pop
5963 e2fe6369 Iustin Pop
5964 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5965 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5966 74409b12 Iustin Pop

5967 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5968 74409b12 Iustin Pop
  used in both instance create and instance modify.
5969 74409b12 Iustin Pop

5970 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5971 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5972 74409b12 Iustin Pop
  @type nodenames: list
5973 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5974 74409b12 Iustin Pop
  @type hvname: string
5975 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5976 74409b12 Iustin Pop
  @type hvparams: dict
5977 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5978 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5979 74409b12 Iustin Pop

5980 74409b12 Iustin Pop
  """
5981 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5982 74409b12 Iustin Pop
                                                  hvname,
5983 74409b12 Iustin Pop
                                                  hvparams)
5984 74409b12 Iustin Pop
  for node in nodenames:
5985 781de953 Iustin Pop
    info = hvinfo[node]
5986 68c6f21c Iustin Pop
    if info.offline:
5987 68c6f21c Iustin Pop
      continue
5988 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5989 74409b12 Iustin Pop
5990 74409b12 Iustin Pop
5991 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5992 a8083063 Iustin Pop
  """Create an instance.
5993 a8083063 Iustin Pop

5994 a8083063 Iustin Pop
  """
5995 a8083063 Iustin Pop
  HPATH = "instance-add"
5996 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5997 f276c4b5 Iustin Pop
  _OP_REQP = ["instance_name", "disks",
5998 08db7c5c Iustin Pop
              "mode", "start",
5999 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
6000 338e51e8 Iustin Pop
              "hvparams", "beparams"]
6001 7baf741d Guido Trotter
  REQ_BGL = False
6002 7baf741d Guido Trotter
6003 5f23e043 Iustin Pop
  def CheckArguments(self):
6004 5f23e043 Iustin Pop
    """Check arguments.
6005 5f23e043 Iustin Pop

6006 5f23e043 Iustin Pop
    """
6007 df4272e5 Iustin Pop
    # set optional parameters to none if they don't exist
6008 f276c4b5 Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor",
6009 e588764d Iustin Pop
                 "disk_template", "identify_defaults"]:
6010 df4272e5 Iustin Pop
      if not hasattr(self.op, attr):
6011 df4272e5 Iustin Pop
        setattr(self.op, attr, None)
6012 df4272e5 Iustin Pop
6013 5f23e043 Iustin Pop
    # do not require name_check to ease forward/backward compatibility
6014 5f23e043 Iustin Pop
    # for tools
6015 5f23e043 Iustin Pop
    if not hasattr(self.op, "name_check"):
6016 5f23e043 Iustin Pop
      self.op.name_check = True
6017 25a8792c Iustin Pop
    if not hasattr(self.op, "no_install"):
6018 25a8792c Iustin Pop
      self.op.no_install = False
6019 25a8792c Iustin Pop
    if self.op.no_install and self.op.start:
6020 25a8792c Iustin Pop
      self.LogInfo("No-installation mode selected, disabling startup")
6021 25a8792c Iustin Pop
      self.op.start = False
6022 44caf5a8 Iustin Pop
    # validate/normalize the instance name
6023 44caf5a8 Iustin Pop
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
6024 5f23e043 Iustin Pop
    if self.op.ip_check and not self.op.name_check:
6025 5f23e043 Iustin Pop
      # TODO: make the ip check more flexible and not depend on the name check
6026 5f23e043 Iustin Pop
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6027 5f23e043 Iustin Pop
                                 errors.ECODE_INVAL)
6028 c3589cf8 Iustin Pop
    # check disk information: either all adopt, or no adopt
6029 c3589cf8 Iustin Pop
    has_adopt = has_no_adopt = False
6030 c3589cf8 Iustin Pop
    for disk in self.op.disks:
6031 c3589cf8 Iustin Pop
      if "adopt" in disk:
6032 c3589cf8 Iustin Pop
        has_adopt = True
6033 c3589cf8 Iustin Pop
      else:
6034 c3589cf8 Iustin Pop
        has_no_adopt = True
6035 c3589cf8 Iustin Pop
    if has_adopt and has_no_adopt:
6036 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Either all disks are adopted or none is",
6037 c3589cf8 Iustin Pop
                                 errors.ECODE_INVAL)
6038 c3589cf8 Iustin Pop
    if has_adopt:
6039 c3589cf8 Iustin Pop
      if self.op.disk_template != constants.DT_PLAIN:
6040 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption is only supported for the"
6041 c3589cf8 Iustin Pop
                                   " 'plain' disk template",
6042 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6043 c3589cf8 Iustin Pop
      if self.op.iallocator is not None:
6044 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6045 c3589cf8 Iustin Pop
                                   " iallocator script", errors.ECODE_INVAL)
6046 c3589cf8 Iustin Pop
      if self.op.mode == constants.INSTANCE_IMPORT:
6047 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed for"
6048 c3589cf8 Iustin Pop
                                   " instance import", errors.ECODE_INVAL)
6049 c3589cf8 Iustin Pop
6050 c3589cf8 Iustin Pop
    self.adopt_disks = has_adopt
6051 5f23e043 Iustin Pop
6052 417eabe2 Iustin Pop
    # verify creation mode
6053 417eabe2 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
6054 417eabe2 Iustin Pop
                            constants.INSTANCE_IMPORT):
6055 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
6056 417eabe2 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
6057 417eabe2 Iustin Pop
6058 417eabe2 Iustin Pop
    # instance name verification
6059 417eabe2 Iustin Pop
    if self.op.name_check:
6060 417eabe2 Iustin Pop
      self.hostname1 = utils.GetHostInfo(self.op.instance_name)
6061 417eabe2 Iustin Pop
      self.op.instance_name = self.hostname1.name
6062 417eabe2 Iustin Pop
      # used in CheckPrereq for ip ping check
6063 417eabe2 Iustin Pop
      self.check_ip = self.hostname1.ip
6064 417eabe2 Iustin Pop
    else:
6065 417eabe2 Iustin Pop
      self.check_ip = None
6066 417eabe2 Iustin Pop
6067 417eabe2 Iustin Pop
    # file storage checks
6068 417eabe2 Iustin Pop
    if (self.op.file_driver and
6069 417eabe2 Iustin Pop
        not self.op.file_driver in constants.FILE_DRIVER):
6070 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6071 417eabe2 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
6072 417eabe2 Iustin Pop
6073 417eabe2 Iustin Pop
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6074 417eabe2 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
6075 417eabe2 Iustin Pop
                                 errors.ECODE_INVAL)
6076 417eabe2 Iustin Pop
6077 417eabe2 Iustin Pop
    ### Node/iallocator related checks
6078 417eabe2 Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
6079 417eabe2 Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
6080 417eabe2 Iustin Pop
                                 " node must be given",
6081 417eabe2 Iustin Pop
                                 errors.ECODE_INVAL)
6082 417eabe2 Iustin Pop
6083 417eabe2 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6084 417eabe2 Iustin Pop
      # On import force_variant must be True, because if we forced it at
6085 417eabe2 Iustin Pop
      # initial install, our only chance when importing it back is that it
6086 417eabe2 Iustin Pop
      # works again!
6087 417eabe2 Iustin Pop
      self.op.force_variant = True
6088 417eabe2 Iustin Pop
6089 417eabe2 Iustin Pop
      if self.op.no_install:
6090 417eabe2 Iustin Pop
        self.LogInfo("No-installation mode has no effect during import")
6091 417eabe2 Iustin Pop
6092 417eabe2 Iustin Pop
    else: # INSTANCE_CREATE
6093 417eabe2 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
6094 417eabe2 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
6095 417eabe2 Iustin Pop
                                   errors.ECODE_INVAL)
6096 417eabe2 Iustin Pop
      self.op.force_variant = getattr(self.op, "force_variant", False)
6097 f276c4b5 Iustin Pop
      if self.op.disk_template is None:
6098 f276c4b5 Iustin Pop
        raise errors.OpPrereqError("No disk template specified",
6099 f276c4b5 Iustin Pop
                                   errors.ECODE_INVAL)
6100 417eabe2 Iustin Pop
6101 7baf741d Guido Trotter
  def ExpandNames(self):
6102 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
6103 7baf741d Guido Trotter

6104 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
6105 7baf741d Guido Trotter

6106 7baf741d Guido Trotter
    """
6107 7baf741d Guido Trotter
    self.needed_locks = {}
6108 7baf741d Guido Trotter
6109 417eabe2 Iustin Pop
    instance_name = self.op.instance_name
6110 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
6111 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
6112 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
6113 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6114 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
6115 7baf741d Guido Trotter
6116 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6117 7baf741d Guido Trotter
6118 7baf741d Guido Trotter
    if self.op.iallocator:
6119 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6120 7baf741d Guido Trotter
    else:
6121 cf26a87a Iustin Pop
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6122 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
6123 7baf741d Guido Trotter
      if self.op.snode is not None:
6124 cf26a87a Iustin Pop
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6125 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
6126 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6127 7baf741d Guido Trotter
6128 7baf741d Guido Trotter
    # in case of import lock the source node too
6129 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6130 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
6131 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
6132 7baf741d Guido Trotter
6133 b9322a9f Guido Trotter
      if src_path is None:
6134 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
6135 b9322a9f Guido Trotter
6136 b9322a9f Guido Trotter
      if src_node is None:
6137 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6138 b9322a9f Guido Trotter
        self.op.src_node = None
6139 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
6140 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
6141 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
6142 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6143 b9322a9f Guido Trotter
      else:
6144 cf26a87a Iustin Pop
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6145 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6146 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6147 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
6148 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
6149 c4feafe8 Iustin Pop
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6150 7baf741d Guido Trotter
6151 538475ca Iustin Pop
  def _RunAllocator(self):
6152 538475ca Iustin Pop
    """Run the allocator based on input opcode.
6153 538475ca Iustin Pop

6154 538475ca Iustin Pop
    """
6155 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
6156 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
6157 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6158 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
6159 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
6160 d1c2dd75 Iustin Pop
                     tags=[],
6161 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
6162 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
6163 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
6164 08db7c5c Iustin Pop
                     disks=self.disks,
6165 d1c2dd75 Iustin Pop
                     nics=nics,
6166 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
6167 29859cb7 Iustin Pop
                     )
6168 d1c2dd75 Iustin Pop
6169 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
6170 d1c2dd75 Iustin Pop
6171 d1c2dd75 Iustin Pop
    if not ial.success:
6172 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
6173 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
6174 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
6175 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6176 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6177 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6178 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
6179 680f0a89 Iustin Pop
                                 (self.op.iallocator, len(ial.result),
6180 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
6181 680f0a89 Iustin Pop
    self.op.pnode = ial.result[0]
6182 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6183 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
6184 680f0a89 Iustin Pop
                 utils.CommaJoin(ial.result))
6185 27579978 Iustin Pop
    if ial.required_nodes == 2:
6186 680f0a89 Iustin Pop
      self.op.snode = ial.result[1]
6187 538475ca Iustin Pop
6188 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6189 a8083063 Iustin Pop
    """Build hooks env.
6190 a8083063 Iustin Pop

6191 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
6192 a8083063 Iustin Pop

6193 a8083063 Iustin Pop
    """
6194 a8083063 Iustin Pop
    env = {
6195 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
6196 a8083063 Iustin Pop
      }
6197 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6198 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
6199 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
6200 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
6201 396e1b78 Michael Hanselmann
6202 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
6203 2c2690c9 Iustin Pop
      name=self.op.instance_name,
6204 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
6205 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
6206 4978db17 Iustin Pop
      status=self.op.start,
6207 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
6208 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
6209 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
6210 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
6211 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
6212 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
6213 67fc3042 Iustin Pop
      bep=self.be_full,
6214 67fc3042 Iustin Pop
      hvp=self.hv_full,
6215 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
6216 396e1b78 Michael Hanselmann
    ))
6217 a8083063 Iustin Pop
6218 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6219 a8083063 Iustin Pop
          self.secondaries)
6220 a8083063 Iustin Pop
    return env, nl, nl
6221 a8083063 Iustin Pop
6222 c1c31426 Iustin Pop
  def _ReadExportInfo(self):
6223 c1c31426 Iustin Pop
    """Reads the export information from disk.
6224 c1c31426 Iustin Pop

6225 c1c31426 Iustin Pop
    It will override the opcode source node and path with the actual
6226 c1c31426 Iustin Pop
    information, if these two were not specified before.
6227 c1c31426 Iustin Pop

6228 c1c31426 Iustin Pop
    @return: the export information
6229 c1c31426 Iustin Pop

6230 c1c31426 Iustin Pop
    """
6231 c1c31426 Iustin Pop
    assert self.op.mode == constants.INSTANCE_IMPORT
6232 c1c31426 Iustin Pop
6233 c1c31426 Iustin Pop
    src_node = self.op.src_node
6234 c1c31426 Iustin Pop
    src_path = self.op.src_path
6235 c1c31426 Iustin Pop
6236 c1c31426 Iustin Pop
    if src_node is None:
6237 c1c31426 Iustin Pop
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6238 c1c31426 Iustin Pop
      exp_list = self.rpc.call_export_list(locked_nodes)
6239 c1c31426 Iustin Pop
      found = False
6240 c1c31426 Iustin Pop
      for node in exp_list:
6241 c1c31426 Iustin Pop
        if exp_list[node].fail_msg:
6242 c1c31426 Iustin Pop
          continue
6243 c1c31426 Iustin Pop
        if src_path in exp_list[node].payload:
6244 c1c31426 Iustin Pop
          found = True
6245 c1c31426 Iustin Pop
          self.op.src_node = src_node = node
6246 c1c31426 Iustin Pop
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6247 c1c31426 Iustin Pop
                                                       src_path)
6248 c1c31426 Iustin Pop
          break
6249 c1c31426 Iustin Pop
      if not found:
6250 c1c31426 Iustin Pop
        raise errors.OpPrereqError("No export found for relative path %s" %
6251 c1c31426 Iustin Pop
                                    src_path, errors.ECODE_INVAL)
6252 c1c31426 Iustin Pop
6253 c1c31426 Iustin Pop
    _CheckNodeOnline(self, src_node)
6254 c1c31426 Iustin Pop
    result = self.rpc.call_export_info(src_node, src_path)
6255 c1c31426 Iustin Pop
    result.Raise("No export or invalid export found in dir %s" % src_path)
6256 c1c31426 Iustin Pop
6257 c1c31426 Iustin Pop
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6258 c1c31426 Iustin Pop
    if not export_info.has_section(constants.INISECT_EXP):
6259 c1c31426 Iustin Pop
      raise errors.ProgrammerError("Corrupted export config",
6260 c1c31426 Iustin Pop
                                   errors.ECODE_ENVIRON)
6261 c1c31426 Iustin Pop
6262 c1c31426 Iustin Pop
    ei_version = export_info.get(constants.INISECT_EXP, "version")
6263 c1c31426 Iustin Pop
    if (int(ei_version) != constants.EXPORT_VERSION):
6264 c1c31426 Iustin Pop
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6265 c1c31426 Iustin Pop
                                 (ei_version, constants.EXPORT_VERSION),
6266 c1c31426 Iustin Pop
                                 errors.ECODE_ENVIRON)
6267 c1c31426 Iustin Pop
    return export_info
6268 a8083063 Iustin Pop
6269 f276c4b5 Iustin Pop
  def _ReadExportParams(self, einfo):
6270 f276c4b5 Iustin Pop
    """Use export parameters as defaults.
6271 f276c4b5 Iustin Pop

6272 f276c4b5 Iustin Pop
    In case the opcode doesn't specify (as in override) some instance
6273 f276c4b5 Iustin Pop
    parameters, then try to use them from the export information, if
6274 f276c4b5 Iustin Pop
    that declares them.
6275 f276c4b5 Iustin Pop

6276 f276c4b5 Iustin Pop
    """
6277 b6cd72b2 Iustin Pop
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
6278 b6cd72b2 Iustin Pop
6279 f276c4b5 Iustin Pop
    if self.op.disk_template is None:
6280 f276c4b5 Iustin Pop
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
6281 f276c4b5 Iustin Pop
        self.op.disk_template = einfo.get(constants.INISECT_INS,
6282 f276c4b5 Iustin Pop
                                          "disk_template")
6283 f276c4b5 Iustin Pop
      else:
6284 f276c4b5 Iustin Pop
        raise errors.OpPrereqError("No disk template specified and the export"
6285 f276c4b5 Iustin Pop
                                   " is missing the disk_template information",
6286 f276c4b5 Iustin Pop
                                   errors.ECODE_INVAL)
6287 f276c4b5 Iustin Pop
6288 9b12ed0f Iustin Pop
    if not self.op.disks:
6289 9b12ed0f Iustin Pop
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
6290 9b12ed0f Iustin Pop
        disks = []
6291 9b12ed0f Iustin Pop
        # TODO: import the disk iv_name too
6292 9b12ed0f Iustin Pop
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
6293 9b12ed0f Iustin Pop
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
6294 9b12ed0f Iustin Pop
          disks.append({"size": disk_sz})
6295 9b12ed0f Iustin Pop
        self.op.disks = disks
6296 9b12ed0f Iustin Pop
      else:
6297 9b12ed0f Iustin Pop
        raise errors.OpPrereqError("No disk info specified and the export"
6298 9b12ed0f Iustin Pop
                                   " is missing the disk information",
6299 9b12ed0f Iustin Pop
                                   errors.ECODE_INVAL)
6300 9b12ed0f Iustin Pop
6301 0af0f641 Iustin Pop
    if (not self.op.nics and
6302 0af0f641 Iustin Pop
        einfo.has_option(constants.INISECT_INS, "nic_count")):
6303 0af0f641 Iustin Pop
      nics = []
6304 0af0f641 Iustin Pop
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
6305 0af0f641 Iustin Pop
        ndict = {}
6306 0af0f641 Iustin Pop
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
6307 0af0f641 Iustin Pop
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
6308 0af0f641 Iustin Pop
          ndict[name] = v
6309 0af0f641 Iustin Pop
        nics.append(ndict)
6310 0af0f641 Iustin Pop
      self.op.nics = nics
6311 0af0f641 Iustin Pop
6312 9f88b0e8 Iustin Pop
    if (self.op.hypervisor is None and
6313 9f88b0e8 Iustin Pop
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
6314 9f88b0e8 Iustin Pop
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
6315 9f88b0e8 Iustin Pop
    if einfo.has_section(constants.INISECT_HYP):
6316 9f88b0e8 Iustin Pop
      # use the export parameters but do not override the ones
6317 9f88b0e8 Iustin Pop
      # specified by the user
6318 9f88b0e8 Iustin Pop
      for name, value in einfo.items(constants.INISECT_HYP):
6319 9f88b0e8 Iustin Pop
        if name not in self.op.hvparams:
6320 9f88b0e8 Iustin Pop
          self.op.hvparams[name] = value
6321 9f88b0e8 Iustin Pop
6322 cc0d88e9 Iustin Pop
    if einfo.has_section(constants.INISECT_BEP):
6323 cc0d88e9 Iustin Pop
      # use the parameters, without overriding
6324 cc0d88e9 Iustin Pop
      for name, value in einfo.items(constants.INISECT_BEP):
6325 cc0d88e9 Iustin Pop
        if name not in self.op.beparams:
6326 cc0d88e9 Iustin Pop
          self.op.beparams[name] = value
6327 cc0d88e9 Iustin Pop
    else:
6328 cc0d88e9 Iustin Pop
      # try to read the parameters old style, from the main section
6329 cc0d88e9 Iustin Pop
      for name in constants.BES_PARAMETERS:
6330 cc0d88e9 Iustin Pop
        if (name not in self.op.beparams and
6331 cc0d88e9 Iustin Pop
            einfo.has_option(constants.INISECT_INS, name)):
6332 cc0d88e9 Iustin Pop
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
6333 cc0d88e9 Iustin Pop
6334 e588764d Iustin Pop
  def _RevertToDefaults(self, cluster):
6335 e588764d Iustin Pop
    """Revert the instance parameters to the default values.
6336 e588764d Iustin Pop

6337 e588764d Iustin Pop
    """
6338 e588764d Iustin Pop
    # hvparams
6339 e588764d Iustin Pop
    hv_defs = cluster.GetHVDefaults(self.op.hypervisor, self.op.os_type)
6340 e588764d Iustin Pop
    for name in self.op.hvparams.keys():
6341 e588764d Iustin Pop
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
6342 e588764d Iustin Pop
        del self.op.hvparams[name]
6343 e588764d Iustin Pop
    # beparams
6344 e588764d Iustin Pop
    be_defs = cluster.beparams.get(constants.PP_DEFAULT, {})
6345 e588764d Iustin Pop
    for name in self.op.beparams.keys():
6346 e588764d Iustin Pop
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
6347 e588764d Iustin Pop
        del self.op.beparams[name]
6348 e588764d Iustin Pop
    # nic params
6349 e588764d Iustin Pop
    nic_defs = cluster.nicparams.get(constants.PP_DEFAULT, {})
6350 e588764d Iustin Pop
    for nic in self.op.nics:
6351 e588764d Iustin Pop
      for name in constants.NICS_PARAMETERS:
6352 e588764d Iustin Pop
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
6353 e588764d Iustin Pop
          del nic[name]
6354 e588764d Iustin Pop
6355 a8083063 Iustin Pop
  def CheckPrereq(self):
6356 a8083063 Iustin Pop
    """Check prerequisites.
6357 a8083063 Iustin Pop

6358 a8083063 Iustin Pop
    """
6359 c1c31426 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6360 c1c31426 Iustin Pop
      export_info = self._ReadExportInfo()
6361 f276c4b5 Iustin Pop
      self._ReadExportParams(export_info)
6362 f276c4b5 Iustin Pop
6363 f276c4b5 Iustin Pop
    _CheckDiskTemplate(self.op.disk_template)
6364 c1c31426 Iustin Pop
6365 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
6366 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
6367 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6368 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
6369 eedc99de Manuel Franceschini
6370 22f50b1d Iustin Pop
    if self.op.hypervisor is None:
6371 22f50b1d Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
6372 22f50b1d Iustin Pop
6373 22f50b1d Iustin Pop
    cluster = self.cfg.GetClusterInfo()
6374 22f50b1d Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
6375 22f50b1d Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
6376 22f50b1d Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6377 22f50b1d Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
6378 22f50b1d Iustin Pop
                                  ",".join(enabled_hvs)),
6379 22f50b1d Iustin Pop
                                 errors.ECODE_STATE)
6380 22f50b1d Iustin Pop
6381 22f50b1d Iustin Pop
    # check hypervisor parameter syntax (locally)
6382 22f50b1d Iustin Pop
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6383 b6cd72b2 Iustin Pop
    filled_hvp = objects.FillDict(cluster.GetHVDefaults(self.op.hypervisor,
6384 b6cd72b2 Iustin Pop
                                                        self.op.os_type),
6385 22f50b1d Iustin Pop
                                  self.op.hvparams)
6386 22f50b1d Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6387 22f50b1d Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
6388 22f50b1d Iustin Pop
    self.hv_full = filled_hvp
6389 22f50b1d Iustin Pop
    # check that we don't specify global parameters on an instance
6390 22f50b1d Iustin Pop
    _CheckGlobalHvParams(self.op.hvparams)
6391 22f50b1d Iustin Pop
6392 22f50b1d Iustin Pop
    # fill and remember the beparams dict
6393 22f50b1d Iustin Pop
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6394 22f50b1d Iustin Pop
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
6395 22f50b1d Iustin Pop
                                    self.op.beparams)
6396 22f50b1d Iustin Pop
6397 e588764d Iustin Pop
    # now that hvp/bep are in final format, let's reset to defaults,
6398 e588764d Iustin Pop
    # if told to do so
6399 e588764d Iustin Pop
    if self.op.identify_defaults:
6400 e588764d Iustin Pop
      self._RevertToDefaults(cluster)
6401 e588764d Iustin Pop
6402 22f50b1d Iustin Pop
    # NIC buildup
6403 22f50b1d Iustin Pop
    self.nics = []
6404 22f50b1d Iustin Pop
    for idx, nic in enumerate(self.op.nics):
6405 22f50b1d Iustin Pop
      nic_mode_req = nic.get("mode", None)
6406 22f50b1d Iustin Pop
      nic_mode = nic_mode_req
6407 22f50b1d Iustin Pop
      if nic_mode is None:
6408 22f50b1d Iustin Pop
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6409 22f50b1d Iustin Pop
6410 22f50b1d Iustin Pop
      # in routed mode, for the first nic, the default ip is 'auto'
6411 22f50b1d Iustin Pop
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6412 22f50b1d Iustin Pop
        default_ip_mode = constants.VALUE_AUTO
6413 22f50b1d Iustin Pop
      else:
6414 22f50b1d Iustin Pop
        default_ip_mode = constants.VALUE_NONE
6415 22f50b1d Iustin Pop
6416 22f50b1d Iustin Pop
      # ip validity checks
6417 22f50b1d Iustin Pop
      ip = nic.get("ip", default_ip_mode)
6418 22f50b1d Iustin Pop
      if ip is None or ip.lower() == constants.VALUE_NONE:
6419 22f50b1d Iustin Pop
        nic_ip = None
6420 22f50b1d Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
6421 22f50b1d Iustin Pop
        if not self.op.name_check:
6422 22f50b1d Iustin Pop
          raise errors.OpPrereqError("IP address set to auto but name checks"
6423 22f50b1d Iustin Pop
                                     " have been skipped. Aborting.",
6424 22f50b1d Iustin Pop
                                     errors.ECODE_INVAL)
6425 22f50b1d Iustin Pop
        nic_ip = self.hostname1.ip
6426 22f50b1d Iustin Pop
      else:
6427 22f50b1d Iustin Pop
        if not utils.IsValidIP(ip):
6428 22f50b1d Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6429 22f50b1d Iustin Pop
                                     " like a valid IP" % ip,
6430 22f50b1d Iustin Pop
                                     errors.ECODE_INVAL)
6431 22f50b1d Iustin Pop
        nic_ip = ip
6432 22f50b1d Iustin Pop
6433 22f50b1d Iustin Pop
      # TODO: check the ip address for uniqueness
6434 22f50b1d Iustin Pop
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6435 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6436 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6437 22f50b1d Iustin Pop
6438 22f50b1d Iustin Pop
      # MAC address verification
6439 22f50b1d Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
6440 22f50b1d Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6441 22f50b1d Iustin Pop
        mac = utils.NormalizeAndValidateMac(mac)
6442 22f50b1d Iustin Pop
6443 22f50b1d Iustin Pop
        try:
6444 22f50b1d Iustin Pop
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
6445 22f50b1d Iustin Pop
        except errors.ReservationError:
6446 22f50b1d Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
6447 22f50b1d Iustin Pop
                                     " in cluster" % mac,
6448 22f50b1d Iustin Pop
                                     errors.ECODE_NOTUNIQUE)
6449 22f50b1d Iustin Pop
6450 22f50b1d Iustin Pop
      # bridge verification
6451 22f50b1d Iustin Pop
      bridge = nic.get("bridge", None)
6452 22f50b1d Iustin Pop
      link = nic.get("link", None)
6453 22f50b1d Iustin Pop
      if bridge and link:
6454 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6455 22f50b1d Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
6456 22f50b1d Iustin Pop
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
6457 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
6458 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6459 22f50b1d Iustin Pop
      elif bridge:
6460 22f50b1d Iustin Pop
        link = bridge
6461 22f50b1d Iustin Pop
6462 22f50b1d Iustin Pop
      nicparams = {}
6463 22f50b1d Iustin Pop
      if nic_mode_req:
6464 22f50b1d Iustin Pop
        nicparams[constants.NIC_MODE] = nic_mode_req
6465 22f50b1d Iustin Pop
      if link:
6466 22f50b1d Iustin Pop
        nicparams[constants.NIC_LINK] = link
6467 22f50b1d Iustin Pop
6468 22f50b1d Iustin Pop
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
6469 22f50b1d Iustin Pop
                                      nicparams)
6470 22f50b1d Iustin Pop
      objects.NIC.CheckParameterSyntax(check_params)
6471 22f50b1d Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
6472 22f50b1d Iustin Pop
6473 22f50b1d Iustin Pop
    # disk checks/pre-build
6474 22f50b1d Iustin Pop
    self.disks = []
6475 22f50b1d Iustin Pop
    for disk in self.op.disks:
6476 22f50b1d Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
6477 22f50b1d Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
6478 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
6479 22f50b1d Iustin Pop
                                   mode, errors.ECODE_INVAL)
6480 22f50b1d Iustin Pop
      size = disk.get("size", None)
6481 22f50b1d Iustin Pop
      if size is None:
6482 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
6483 22f50b1d Iustin Pop
      try:
6484 22f50b1d Iustin Pop
        size = int(size)
6485 22f50b1d Iustin Pop
      except (TypeError, ValueError):
6486 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
6487 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6488 22f50b1d Iustin Pop
      new_disk = {"size": size, "mode": mode}
6489 22f50b1d Iustin Pop
      if "adopt" in disk:
6490 22f50b1d Iustin Pop
        new_disk["adopt"] = disk["adopt"]
6491 22f50b1d Iustin Pop
      self.disks.append(new_disk)
6492 22f50b1d Iustin Pop
6493 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6494 a8083063 Iustin Pop
6495 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
6496 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
6497 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6498 09acf207 Guido Trotter
      if instance_disks < export_disks:
6499 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
6500 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
6501 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
6502 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6503 a8083063 Iustin Pop
6504 09acf207 Guido Trotter
      disk_images = []
6505 09acf207 Guido Trotter
      for idx in range(export_disks):
6506 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
6507 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
6508 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
6509 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
6510 c1c31426 Iustin Pop
          image = utils.PathJoin(self.op.src_path, export_name)
6511 09acf207 Guido Trotter
          disk_images.append(image)
6512 09acf207 Guido Trotter
        else:
6513 09acf207 Guido Trotter
          disk_images.append(False)
6514 09acf207 Guido Trotter
6515 09acf207 Guido Trotter
      self.src_images = disk_images
6516 901a65c1 Iustin Pop
6517 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
6518 2a518543 Iustin Pop
      try:
6519 2a518543 Iustin Pop
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
6520 2a518543 Iustin Pop
      except (TypeError, ValueError), err:
6521 2a518543 Iustin Pop
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
6522 2a518543 Iustin Pop
                                   " an integer: %s" % str(err),
6523 2a518543 Iustin Pop
                                   errors.ECODE_STATE)
6524 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
6525 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
6526 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6527 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
6528 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6529 bc89efc3 Guido Trotter
6530 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6531 901a65c1 Iustin Pop
6532 18c8f361 Iustin Pop
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6533 901a65c1 Iustin Pop
    if self.op.ip_check:
6534 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6535 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6536 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
6537 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
6538 901a65c1 Iustin Pop
6539 295728df Guido Trotter
    #### mac address generation
6540 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
6541 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
6542 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
6543 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
6544 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
6545 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
6546 295728df Guido Trotter
    # creation job will fail.
6547 295728df Guido Trotter
    for nic in self.nics:
6548 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6549 36b66e6e Guido Trotter
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6550 295728df Guido Trotter
6551 538475ca Iustin Pop
    #### allocator run
6552 538475ca Iustin Pop
6553 538475ca Iustin Pop
    if self.op.iallocator is not None:
6554 538475ca Iustin Pop
      self._RunAllocator()
6555 0f1a06e3 Manuel Franceschini
6556 901a65c1 Iustin Pop
    #### node related checks
6557 901a65c1 Iustin Pop
6558 901a65c1 Iustin Pop
    # check primary node
6559 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6560 7baf741d Guido Trotter
    assert self.pnode is not None, \
6561 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
6562 7527a8a4 Iustin Pop
    if pnode.offline:
6563 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6564 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6565 733a2b6a Iustin Pop
    if pnode.drained:
6566 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6567 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6568 7527a8a4 Iustin Pop
6569 901a65c1 Iustin Pop
    self.secondaries = []
6570 901a65c1 Iustin Pop
6571 901a65c1 Iustin Pop
    # mirror node verification
6572 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6573 7baf741d Guido Trotter
      if self.op.snode is None:
6574 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
6575 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
6576 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
6577 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
6578 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
6579 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
6580 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
6581 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
6582 a8083063 Iustin Pop
6583 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
6584 6785674e Iustin Pop
6585 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
6586 08db7c5c Iustin Pop
                                self.disks)
6587 ed1ebc60 Guido Trotter
6588 c3589cf8 Iustin Pop
    # Check lv size requirements, if not adopting
6589 c3589cf8 Iustin Pop
    if req_size is not None and not self.adopt_disks:
6590 701384a9 Iustin Pop
      _CheckNodesFreeDisk(self, nodenames, req_size)
6591 ed1ebc60 Guido Trotter
6592 c3589cf8 Iustin Pop
    if self.adopt_disks: # instead, we must check the adoption data
6593 c3589cf8 Iustin Pop
      all_lvs = set([i["adopt"] for i in self.disks])
6594 c3589cf8 Iustin Pop
      if len(all_lvs) != len(self.disks):
6595 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
6596 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6597 c3589cf8 Iustin Pop
      for lv_name in all_lvs:
6598 c3589cf8 Iustin Pop
        try:
6599 c3589cf8 Iustin Pop
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
6600 c3589cf8 Iustin Pop
        except errors.ReservationError:
6601 c3589cf8 Iustin Pop
          raise errors.OpPrereqError("LV named %s used by another instance" %
6602 c3589cf8 Iustin Pop
                                     lv_name, errors.ECODE_NOTUNIQUE)
6603 c3589cf8 Iustin Pop
6604 c3589cf8 Iustin Pop
      node_lvs = self.rpc.call_lv_list([pnode.name],
6605 c3589cf8 Iustin Pop
                                       self.cfg.GetVGName())[pnode.name]
6606 c3589cf8 Iustin Pop
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
6607 c3589cf8 Iustin Pop
      node_lvs = node_lvs.payload
6608 c3589cf8 Iustin Pop
      delta = all_lvs.difference(node_lvs.keys())
6609 c3589cf8 Iustin Pop
      if delta:
6610 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
6611 c3589cf8 Iustin Pop
                                   utils.CommaJoin(delta),
6612 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6613 c3589cf8 Iustin Pop
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
6614 c3589cf8 Iustin Pop
      if online_lvs:
6615 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Online logical volumes found, cannot"
6616 c3589cf8 Iustin Pop
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
6617 c3589cf8 Iustin Pop
                                   errors.ECODE_STATE)
6618 c3589cf8 Iustin Pop
      # update the size of disk based on what is found
6619 c3589cf8 Iustin Pop
      for dsk in self.disks:
6620 c3589cf8 Iustin Pop
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
6621 c3589cf8 Iustin Pop
6622 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6623 6785674e Iustin Pop
6624 231cd901 Iustin Pop
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
6625 a8083063 Iustin Pop
6626 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6627 a8083063 Iustin Pop
6628 49ce1563 Iustin Pop
    # memory check on primary node
6629 49ce1563 Iustin Pop
    if self.op.start:
6630 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
6631 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
6632 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
6633 338e51e8 Iustin Pop
                           self.op.hypervisor)
6634 49ce1563 Iustin Pop
6635 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6636 08896026 Iustin Pop
6637 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6638 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6639 a8083063 Iustin Pop

6640 a8083063 Iustin Pop
    """
6641 a8083063 Iustin Pop
    instance = self.op.instance_name
6642 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6643 a8083063 Iustin Pop
6644 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6645 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6646 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6647 2a6469d5 Alexander Schreiber
    else:
6648 2a6469d5 Alexander Schreiber
      network_port = None
6649 58acb49d Alexander Schreiber
6650 0e3baaf3 Iustin Pop
    if constants.ENABLE_FILE_STORAGE:
6651 0e3baaf3 Iustin Pop
      # this is needed because os.path.join does not accept None arguments
6652 0e3baaf3 Iustin Pop
      if self.op.file_storage_dir is None:
6653 0e3baaf3 Iustin Pop
        string_file_storage_dir = ""
6654 0e3baaf3 Iustin Pop
      else:
6655 0e3baaf3 Iustin Pop
        string_file_storage_dir = self.op.file_storage_dir
6656 31a853d2 Iustin Pop
6657 0e3baaf3 Iustin Pop
      # build the full file storage dir path
6658 0e3baaf3 Iustin Pop
      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6659 0e3baaf3 Iustin Pop
                                        string_file_storage_dir, instance)
6660 2c313123 Manuel Franceschini
    else:
6661 0e3baaf3 Iustin Pop
      file_storage_dir = ""
6662 0f1a06e3 Manuel Franceschini
6663 0f1a06e3 Manuel Franceschini
6664 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6665 a8083063 Iustin Pop
                                  self.op.disk_template,
6666 a8083063 Iustin Pop
                                  instance, pnode_name,
6667 08db7c5c Iustin Pop
                                  self.secondaries,
6668 08db7c5c Iustin Pop
                                  self.disks,
6669 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6670 e2a65344 Iustin Pop
                                  self.op.file_driver,
6671 e2a65344 Iustin Pop
                                  0)
6672 a8083063 Iustin Pop
6673 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6674 a8083063 Iustin Pop
                            primary_node=pnode_name,
6675 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6676 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6677 4978db17 Iustin Pop
                            admin_up=False,
6678 58acb49d Alexander Schreiber
                            network_port=network_port,
6679 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6680 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6681 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6682 a8083063 Iustin Pop
                            )
6683 a8083063 Iustin Pop
6684 c3589cf8 Iustin Pop
    if self.adopt_disks:
6685 c3589cf8 Iustin Pop
      # rename LVs to the newly-generated names; we need to construct
6686 c3589cf8 Iustin Pop
      # 'fake' LV disks with the old data, plus the new unique_id
6687 c3589cf8 Iustin Pop
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
6688 c3589cf8 Iustin Pop
      rename_to = []
6689 c3589cf8 Iustin Pop
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
6690 c3589cf8 Iustin Pop
        rename_to.append(t_dsk.logical_id)
6691 c3589cf8 Iustin Pop
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
6692 c3589cf8 Iustin Pop
        self.cfg.SetDiskID(t_dsk, pnode_name)
6693 c3589cf8 Iustin Pop
      result = self.rpc.call_blockdev_rename(pnode_name,
6694 c3589cf8 Iustin Pop
                                             zip(tmp_disks, rename_to))
6695 c3589cf8 Iustin Pop
      result.Raise("Failed to rename adoped LVs")
6696 c3589cf8 Iustin Pop
    else:
6697 c3589cf8 Iustin Pop
      feedback_fn("* creating instance disks...")
6698 796cab27 Iustin Pop
      try:
6699 c3589cf8 Iustin Pop
        _CreateDisks(self, iobj)
6700 c3589cf8 Iustin Pop
      except errors.OpExecError:
6701 c3589cf8 Iustin Pop
        self.LogWarning("Device creation failed, reverting...")
6702 c3589cf8 Iustin Pop
        try:
6703 c3589cf8 Iustin Pop
          _RemoveDisks(self, iobj)
6704 c3589cf8 Iustin Pop
        finally:
6705 c3589cf8 Iustin Pop
          self.cfg.ReleaseDRBDMinors(instance)
6706 c3589cf8 Iustin Pop
          raise
6707 a8083063 Iustin Pop
6708 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6709 a8083063 Iustin Pop
6710 0debfb35 Guido Trotter
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6711 0debfb35 Guido Trotter
6712 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6713 7baf741d Guido Trotter
    # added the instance to the config
6714 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6715 e36e96b4 Guido Trotter
    # Unlock all the nodes
6716 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6717 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6718 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6719 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6720 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6721 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6722 9c8971d7 Guido Trotter
    else:
6723 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6724 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6725 a8083063 Iustin Pop
6726 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6727 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6728 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6729 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6730 a8083063 Iustin Pop
      time.sleep(15)
6731 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6732 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6733 a8083063 Iustin Pop
    else:
6734 a8083063 Iustin Pop
      disk_abort = False
6735 a8083063 Iustin Pop
6736 a8083063 Iustin Pop
    if disk_abort:
6737 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6738 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6739 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6740 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6741 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6742 3ecf6786 Iustin Pop
                               " this instance")
6743 a8083063 Iustin Pop
6744 c3589cf8 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
6745 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6746 25a8792c Iustin Pop
        if not self.op.no_install:
6747 25a8792c Iustin Pop
          feedback_fn("* running the instance OS create scripts...")
6748 25a8792c Iustin Pop
          # FIXME: pass debug option from opcode to backend
6749 25a8792c Iustin Pop
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6750 25a8792c Iustin Pop
                                                 self.op.debug_level)
6751 25a8792c Iustin Pop
          result.Raise("Could not add os for instance %s"
6752 25a8792c Iustin Pop
                       " on node %s" % (instance, pnode_name))
6753 a8083063 Iustin Pop
6754 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6755 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6756 a8083063 Iustin Pop
        src_node = self.op.src_node
6757 09acf207 Guido Trotter
        src_images = self.src_images
6758 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6759 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6760 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6761 09acf207 Guido Trotter
                                                         src_node, src_images,
6762 dd713605 Iustin Pop
                                                         cluster_name,
6763 dd713605 Iustin Pop
                                                         self.op.debug_level)
6764 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6765 944bf548 Iustin Pop
        if msg:
6766 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6767 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6768 a8083063 Iustin Pop
      else:
6769 a8083063 Iustin Pop
        # also checked in the prereq part
6770 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6771 3ecf6786 Iustin Pop
                                     % self.op.mode)
6772 a8083063 Iustin Pop
6773 a8083063 Iustin Pop
    if self.op.start:
6774 4978db17 Iustin Pop
      iobj.admin_up = True
6775 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6776 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6777 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6778 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6779 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6780 a8083063 Iustin Pop
6781 08896026 Iustin Pop
    return list(iobj.all_nodes)
6782 08896026 Iustin Pop
6783 a8083063 Iustin Pop
6784 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6785 a8083063 Iustin Pop
  """Connect to an instance's console.
6786 a8083063 Iustin Pop

6787 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6788 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6789 a8083063 Iustin Pop
  console.
6790 a8083063 Iustin Pop

6791 a8083063 Iustin Pop
  """
6792 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6793 8659b73e Guido Trotter
  REQ_BGL = False
6794 8659b73e Guido Trotter
6795 8659b73e Guido Trotter
  def ExpandNames(self):
6796 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6797 a8083063 Iustin Pop
6798 a8083063 Iustin Pop
  def CheckPrereq(self):
6799 a8083063 Iustin Pop
    """Check prerequisites.
6800 a8083063 Iustin Pop

6801 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6802 a8083063 Iustin Pop

6803 a8083063 Iustin Pop
    """
6804 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6805 8659b73e Guido Trotter
    assert self.instance is not None, \
6806 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6807 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6808 a8083063 Iustin Pop
6809 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6810 a8083063 Iustin Pop
    """Connect to the console of an instance
6811 a8083063 Iustin Pop

6812 a8083063 Iustin Pop
    """
6813 a8083063 Iustin Pop
    instance = self.instance
6814 a8083063 Iustin Pop
    node = instance.primary_node
6815 a8083063 Iustin Pop
6816 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6817 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6818 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6819 a8083063 Iustin Pop
6820 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6821 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6822 a8083063 Iustin Pop
6823 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6824 a8083063 Iustin Pop
6825 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6826 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6827 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6828 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6829 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6830 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6831 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6832 b047857b Michael Hanselmann
6833 82122173 Iustin Pop
    # build ssh cmdline
6834 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6835 a8083063 Iustin Pop
6836 a8083063 Iustin Pop
6837 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6838 a8083063 Iustin Pop
  """Replace the disks of an instance.
6839 a8083063 Iustin Pop

6840 a8083063 Iustin Pop
  """
6841 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6842 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6843 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6844 efd990e4 Guido Trotter
  REQ_BGL = False
6845 efd990e4 Guido Trotter
6846 7e9366f7 Iustin Pop
  def CheckArguments(self):
6847 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6848 efd990e4 Guido Trotter
      self.op.remote_node = None
6849 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6850 7e9366f7 Iustin Pop
      self.op.iallocator = None
6851 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6852 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6853 7e9366f7 Iustin Pop
6854 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6855 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6856 7e9366f7 Iustin Pop
6857 7e9366f7 Iustin Pop
  def ExpandNames(self):
6858 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6859 7e9366f7 Iustin Pop
6860 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6861 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6862 2bb5c911 Michael Hanselmann
6863 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6864 cf26a87a Iustin Pop
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6865 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6866 2bb5c911 Michael Hanselmann
6867 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6868 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6869 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6870 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6871 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6872 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6873 2bb5c911 Michael Hanselmann
6874 efd990e4 Guido Trotter
    else:
6875 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6876 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6877 efd990e4 Guido Trotter
6878 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6879 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6880 7ea7bcf6 Iustin Pop
                                   self.op.disks, False, self.op.early_release)
6881 c68174b6 Michael Hanselmann
6882 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6883 2bb5c911 Michael Hanselmann
6884 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6885 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6886 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6887 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6888 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6889 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6890 a8083063 Iustin Pop
6891 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6892 a8083063 Iustin Pop
    """Build hooks env.
6893 a8083063 Iustin Pop

6894 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6895 a8083063 Iustin Pop

6896 a8083063 Iustin Pop
    """
6897 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6898 a8083063 Iustin Pop
    env = {
6899 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6900 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6901 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6902 a8083063 Iustin Pop
      }
6903 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6904 0834c866 Iustin Pop
    nl = [
6905 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6906 2bb5c911 Michael Hanselmann
      instance.primary_node,
6907 0834c866 Iustin Pop
      ]
6908 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6909 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6910 a8083063 Iustin Pop
    return env, nl, nl
6911 a8083063 Iustin Pop
6912 2bb5c911 Michael Hanselmann
6913 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6914 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6915 7ffc5a86 Michael Hanselmann

6916 7ffc5a86 Michael Hanselmann
  """
6917 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6918 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6919 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6920 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6921 7ffc5a86 Michael Hanselmann
6922 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6923 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6924 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6925 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6926 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6927 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6928 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6929 7ffc5a86 Michael Hanselmann
6930 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6931 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6932 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6933 7ffc5a86 Michael Hanselmann
6934 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6935 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6936 7ffc5a86 Michael Hanselmann
6937 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6938 7ffc5a86 Michael Hanselmann
6939 7ffc5a86 Michael Hanselmann
    # Declare node locks
6940 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6941 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6942 7ffc5a86 Michael Hanselmann
6943 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6944 cf26a87a Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6945 7ffc5a86 Michael Hanselmann
6946 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6947 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6948 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6949 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6950 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
6951 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6952 7ffc5a86 Michael Hanselmann
6953 7ffc5a86 Michael Hanselmann
    else:
6954 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6955 7ffc5a86 Michael Hanselmann
6956 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6957 7ffc5a86 Michael Hanselmann
    # node
6958 7ffc5a86 Michael Hanselmann
    names = []
6959 3a012b41 Michael Hanselmann
    tasklets = []
6960 7ffc5a86 Michael Hanselmann
6961 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6962 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6963 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6964 7ffc5a86 Michael Hanselmann
6965 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6966 94a1b377 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [],
6967 7ea7bcf6 Iustin Pop
                                True, self.op.early_release)
6968 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6969 7ffc5a86 Michael Hanselmann
6970 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6971 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6972 7ffc5a86 Michael Hanselmann
6973 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6974 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6975 7ffc5a86 Michael Hanselmann
6976 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6977 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6978 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6979 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6980 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6981 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6982 7ffc5a86 Michael Hanselmann
6983 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6984 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6985 7ffc5a86 Michael Hanselmann

6986 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6987 7ffc5a86 Michael Hanselmann

6988 7ffc5a86 Michael Hanselmann
    """
6989 7ffc5a86 Michael Hanselmann
    env = {
6990 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6991 7ffc5a86 Michael Hanselmann
      }
6992 7ffc5a86 Michael Hanselmann
6993 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6994 7ffc5a86 Michael Hanselmann
6995 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6996 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6997 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6998 7ffc5a86 Michael Hanselmann
6999 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
7000 7ffc5a86 Michael Hanselmann
7001 7ffc5a86 Michael Hanselmann
7002 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
7003 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
7004 2bb5c911 Michael Hanselmann

7005 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
7006 2bb5c911 Michael Hanselmann

7007 2bb5c911 Michael Hanselmann
  """
7008 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7009 7ea7bcf6 Iustin Pop
               disks, delay_iallocator, early_release):
7010 2bb5c911 Michael Hanselmann
    """Initializes this class.
7011 2bb5c911 Michael Hanselmann

7012 2bb5c911 Michael Hanselmann
    """
7013 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
7014 464243a7 Michael Hanselmann
7015 2bb5c911 Michael Hanselmann
    # Parameters
7016 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
7017 2bb5c911 Michael Hanselmann
    self.mode = mode
7018 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
7019 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
7020 2bb5c911 Michael Hanselmann
    self.disks = disks
7021 94a1b377 Michael Hanselmann
    self.delay_iallocator = delay_iallocator
7022 7ea7bcf6 Iustin Pop
    self.early_release = early_release
7023 2bb5c911 Michael Hanselmann
7024 2bb5c911 Michael Hanselmann
    # Runtime data
7025 2bb5c911 Michael Hanselmann
    self.instance = None
7026 2bb5c911 Michael Hanselmann
    self.new_node = None
7027 2bb5c911 Michael Hanselmann
    self.target_node = None
7028 2bb5c911 Michael Hanselmann
    self.other_node = None
7029 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
7030 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
7031 2bb5c911 Michael Hanselmann
7032 2bb5c911 Michael Hanselmann
  @staticmethod
7033 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
7034 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
7035 c68174b6 Michael Hanselmann

7036 c68174b6 Michael Hanselmann
    """
7037 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
7038 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
7039 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
7040 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
7041 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
7042 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
7043 02a00186 Michael Hanselmann
7044 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
7045 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
7046 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
7047 02a00186 Michael Hanselmann
7048 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
7049 02a00186 Michael Hanselmann
      # Not replacing the secondary
7050 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
7051 02a00186 Michael Hanselmann
                                 " only be used when changing the"
7052 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
7053 2bb5c911 Michael Hanselmann
7054 2bb5c911 Michael Hanselmann
  @staticmethod
7055 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7056 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
7057 2bb5c911 Michael Hanselmann

7058 2bb5c911 Michael Hanselmann
    """
7059 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
7060 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
7061 2bb5c911 Michael Hanselmann
                     name=instance_name,
7062 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
7063 2bb5c911 Michael Hanselmann
7064 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
7065 2bb5c911 Michael Hanselmann
7066 2bb5c911 Michael Hanselmann
    if not ial.success:
7067 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7068 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
7069 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
7070 2bb5c911 Michael Hanselmann
7071 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
7072 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7073 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
7074 d984846d Iustin Pop
                                 (iallocator_name,
7075 680f0a89 Iustin Pop
                                  len(ial.result), ial.required_nodes),
7076 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
7077 2bb5c911 Michael Hanselmann
7078 680f0a89 Iustin Pop
    remote_node_name = ial.result[0]
7079 2bb5c911 Michael Hanselmann
7080 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
7081 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
7082 2bb5c911 Michael Hanselmann
7083 2bb5c911 Michael Hanselmann
    return remote_node_name
7084 2bb5c911 Michael Hanselmann
7085 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
7086 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7087 2d9005d8 Michael Hanselmann
                                    node_name, True)
7088 942be002 Michael Hanselmann
7089 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
7090 2bb5c911 Michael Hanselmann
    """Check prerequisites.
7091 2bb5c911 Michael Hanselmann

7092 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
7093 2bb5c911 Michael Hanselmann

7094 2bb5c911 Michael Hanselmann
    """
7095 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7096 e9022531 Iustin Pop
    assert instance is not None, \
7097 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
7098 2bb5c911 Michael Hanselmann
7099 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
7100 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7101 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
7102 a8083063 Iustin Pop
7103 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7104 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
7105 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
7106 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
7107 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
7108 a8083063 Iustin Pop
7109 94a1b377 Michael Hanselmann
    if not self.delay_iallocator:
7110 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7111 94a1b377 Michael Hanselmann
7112 94a1b377 Michael Hanselmann
  def _CheckPrereq2(self):
7113 94a1b377 Michael Hanselmann
    """Check prerequisites, second part.
7114 94a1b377 Michael Hanselmann

7115 94a1b377 Michael Hanselmann
    This function should always be part of CheckPrereq. It was separated and is
7116 94a1b377 Michael Hanselmann
    now called from Exec because during node evacuation iallocator was only
7117 94a1b377 Michael Hanselmann
    called with an unmodified cluster model, not taking planned changes into
7118 94a1b377 Michael Hanselmann
    account.
7119 94a1b377 Michael Hanselmann

7120 94a1b377 Michael Hanselmann
    """
7121 94a1b377 Michael Hanselmann
    instance = self.instance
7122 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
7123 a9e0c397 Iustin Pop
7124 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
7125 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
7126 2bb5c911 Michael Hanselmann
    else:
7127 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7128 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
7129 b6e82a65 Iustin Pop
7130 a9e0c397 Iustin Pop
    if remote_node is not None:
7131 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7132 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
7133 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
7134 a9e0c397 Iustin Pop
    else:
7135 a9e0c397 Iustin Pop
      self.remote_node_info = None
7136 2bb5c911 Michael Hanselmann
7137 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
7138 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
7139 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
7140 2bb5c911 Michael Hanselmann
7141 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
7142 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
7143 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
7144 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7145 7e9366f7 Iustin Pop
7146 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7147 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
7148 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7149 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7150 942be002 Michael Hanselmann
7151 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
7152 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7153 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7154 942be002 Michael Hanselmann
7155 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
7156 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7157 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
7158 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
7159 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7160 942be002 Michael Hanselmann
7161 942be002 Michael Hanselmann
      if faulty_primary:
7162 942be002 Michael Hanselmann
        self.disks = faulty_primary
7163 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7164 942be002 Michael Hanselmann
        self.other_node = secondary_node
7165 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7166 942be002 Michael Hanselmann
      elif faulty_secondary:
7167 942be002 Michael Hanselmann
        self.disks = faulty_secondary
7168 942be002 Michael Hanselmann
        self.target_node = secondary_node
7169 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7170 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7171 942be002 Michael Hanselmann
      else:
7172 942be002 Michael Hanselmann
        self.disks = []
7173 942be002 Michael Hanselmann
        check_nodes = []
7174 942be002 Michael Hanselmann
7175 942be002 Michael Hanselmann
    else:
7176 942be002 Michael Hanselmann
      # Non-automatic modes
7177 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
7178 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7179 942be002 Michael Hanselmann
        self.other_node = secondary_node
7180 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7181 7e9366f7 Iustin Pop
7182 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
7183 942be002 Michael Hanselmann
        self.target_node = secondary_node
7184 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7185 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7186 a9e0c397 Iustin Pop
7187 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
7188 942be002 Michael Hanselmann
        self.new_node = remote_node
7189 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7190 942be002 Michael Hanselmann
        self.target_node = secondary_node
7191 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
7192 54155f52 Iustin Pop
7193 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
7194 a8083063 Iustin Pop
7195 9af0fa6a Iustin Pop
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7196 9af0fa6a Iustin Pop
        assert old_node_info is not None
7197 9af0fa6a Iustin Pop
        if old_node_info.offline and not self.early_release:
7198 9af0fa6a Iustin Pop
          # doesn't make sense to delay the release
7199 9af0fa6a Iustin Pop
          self.early_release = True
7200 9af0fa6a Iustin Pop
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7201 9af0fa6a Iustin Pop
                          " early-release mode", secondary_node)
7202 9af0fa6a Iustin Pop
7203 942be002 Michael Hanselmann
      else:
7204 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7205 942be002 Michael Hanselmann
                                     self.mode)
7206 942be002 Michael Hanselmann
7207 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
7208 942be002 Michael Hanselmann
      if not self.disks:
7209 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
7210 a9e0c397 Iustin Pop
7211 2bb5c911 Michael Hanselmann
    for node in check_nodes:
7212 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
7213 e4376078 Iustin Pop
7214 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
7215 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
7216 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
7217 e4376078 Iustin Pop
7218 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
7219 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
7220 e4376078 Iustin Pop
7221 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
7222 2bb5c911 Michael Hanselmann
      if node_name is not None:
7223 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7224 e4376078 Iustin Pop
7225 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
7226 a9e0c397 Iustin Pop
7227 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
7228 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
7229 2bb5c911 Michael Hanselmann

7230 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
7231 cff90b79 Iustin Pop

7232 a9e0c397 Iustin Pop
    """
7233 94a1b377 Michael Hanselmann
    if self.delay_iallocator:
7234 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7235 94a1b377 Michael Hanselmann
7236 942be002 Michael Hanselmann
    if not self.disks:
7237 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
7238 942be002 Michael Hanselmann
      return
7239 942be002 Michael Hanselmann
7240 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
7241 1f864b60 Iustin Pop
                (utils.CommaJoin(self.disks), self.instance.name))
7242 7ffc5a86 Michael Hanselmann
7243 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
7244 2bb5c911 Michael Hanselmann
7245 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
7246 2bb5c911 Michael Hanselmann
    if activate_disks:
7247 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
7248 2bb5c911 Michael Hanselmann
7249 2bb5c911 Michael Hanselmann
    try:
7250 942be002 Michael Hanselmann
      # Should we replace the secondary node?
7251 942be002 Michael Hanselmann
      if self.new_node is not None:
7252 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
7253 2bb5c911 Michael Hanselmann
      else:
7254 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
7255 a4eae71f Michael Hanselmann
7256 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
7257 2bb5c911 Michael Hanselmann
7258 2bb5c911 Michael Hanselmann
    finally:
7259 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
7260 5c983ee5 Iustin Pop
      # down instance
7261 2bb5c911 Michael Hanselmann
      if activate_disks:
7262 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7263 2bb5c911 Michael Hanselmann
7264 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
7265 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
7266 2bb5c911 Michael Hanselmann
7267 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
7268 cff90b79 Iustin Pop
7269 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
7270 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
7271 cff90b79 Iustin Pop
    if not results:
7272 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
7273 2bb5c911 Michael Hanselmann
7274 2bb5c911 Michael Hanselmann
    for node in nodes:
7275 781de953 Iustin Pop
      res = results[node]
7276 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
7277 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
7278 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7279 2bb5c911 Michael Hanselmann
                                 (vgname, node))
7280 2bb5c911 Michael Hanselmann
7281 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
7282 2bb5c911 Michael Hanselmann
    # Check disk existence
7283 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7284 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7285 cff90b79 Iustin Pop
        continue
7286 2bb5c911 Michael Hanselmann
7287 2bb5c911 Michael Hanselmann
      for node in nodes:
7288 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7289 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
7290 2bb5c911 Michael Hanselmann
7291 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
7292 2bb5c911 Michael Hanselmann
7293 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7294 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
7295 2bb5c911 Michael Hanselmann
          if not msg:
7296 2bb5c911 Michael Hanselmann
            msg = "disk not found"
7297 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7298 23829f6f Iustin Pop
                                   (idx, node, msg))
7299 cff90b79 Iustin Pop
7300 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7301 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7302 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7303 cff90b79 Iustin Pop
        continue
7304 cff90b79 Iustin Pop
7305 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7306 2bb5c911 Michael Hanselmann
                      (idx, node_name))
7307 2bb5c911 Michael Hanselmann
7308 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7309 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
7310 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7311 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
7312 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
7313 2bb5c911 Michael Hanselmann
7314 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
7315 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
7316 2bb5c911 Michael Hanselmann
    iv_names = {}
7317 2bb5c911 Michael Hanselmann
7318 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7319 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7320 a9e0c397 Iustin Pop
        continue
7321 2bb5c911 Michael Hanselmann
7322 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7323 2bb5c911 Michael Hanselmann
7324 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7325 2bb5c911 Michael Hanselmann
7326 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7327 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
7328 2bb5c911 Michael Hanselmann
7329 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7330 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
7331 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7332 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
7333 2bb5c911 Michael Hanselmann
7334 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
7335 a9e0c397 Iustin Pop
      old_lvs = dev.children
7336 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7337 2bb5c911 Michael Hanselmann
7338 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
7339 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
7340 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7341 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7342 2bb5c911 Michael Hanselmann
7343 2bb5c911 Michael Hanselmann
    return iv_names
7344 2bb5c911 Michael Hanselmann
7345 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
7346 1122eb25 Iustin Pop
    for name, (dev, _, _) in iv_names.iteritems():
7347 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7348 2bb5c911 Michael Hanselmann
7349 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
7350 2bb5c911 Michael Hanselmann
7351 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
7352 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
7353 2bb5c911 Michael Hanselmann
        if not msg:
7354 2bb5c911 Michael Hanselmann
          msg = "disk not found"
7355 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7356 2bb5c911 Michael Hanselmann
                                 (name, msg))
7357 2bb5c911 Michael Hanselmann
7358 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
7359 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7360 2bb5c911 Michael Hanselmann
7361 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
7362 1122eb25 Iustin Pop
    for name, (_, old_lvs, _) in iv_names.iteritems():
7363 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7364 2bb5c911 Michael Hanselmann
7365 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
7366 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
7367 2bb5c911 Michael Hanselmann
7368 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7369 2bb5c911 Michael Hanselmann
        if msg:
7370 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7371 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
7372 2bb5c911 Michael Hanselmann
7373 7ea7bcf6 Iustin Pop
  def _ReleaseNodeLock(self, node_name):
7374 7ea7bcf6 Iustin Pop
    """Releases the lock for a given node."""
7375 7ea7bcf6 Iustin Pop
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7376 7ea7bcf6 Iustin Pop
7377 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7378 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
7379 2bb5c911 Michael Hanselmann

7380 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
7381 2bb5c911 Michael Hanselmann

7382 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
7383 2bb5c911 Michael Hanselmann

7384 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
7385 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
7386 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
7387 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
7388 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
7389 2bb5c911 Michael Hanselmann

7390 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
7391 2bb5c911 Michael Hanselmann

7392 2bb5c911 Michael Hanselmann
      1. for each modified disk:
7393 2bb5c911 Michael Hanselmann

7394 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
7395 2bb5c911 Michael Hanselmann

7396 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
7397 2bb5c911 Michael Hanselmann

7398 2bb5c911 Michael Hanselmann
    """
7399 2bb5c911 Michael Hanselmann
    steps_total = 6
7400 2bb5c911 Michael Hanselmann
7401 2bb5c911 Michael Hanselmann
    # Step: check device activation
7402 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7403 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
7404 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
7405 2bb5c911 Michael Hanselmann
7406 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
7407 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7408 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
7409 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
7410 2bb5c911 Michael Hanselmann
                                False)
7411 2bb5c911 Michael Hanselmann
7412 2bb5c911 Michael Hanselmann
    # Step: create new storage
7413 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7414 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
7415 a9e0c397 Iustin Pop
7416 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
7417 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7418 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7419 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7420 2bb5c911 Michael Hanselmann
7421 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7422 4d4a651d Michael Hanselmann
                                                     old_lvs)
7423 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
7424 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
7425 cff90b79 Iustin Pop
      #dev.children = []
7426 cff90b79 Iustin Pop
      #cfg.Update(instance)
7427 a9e0c397 Iustin Pop
7428 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
7429 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
7430 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7431 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
7432 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
7433 cff90b79 Iustin Pop
7434 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
7435 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
7436 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
7437 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
7438 2bb5c911 Michael Hanselmann
7439 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
7440 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
7441 cff90b79 Iustin Pop
      for to_ren in old_lvs:
7442 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7443 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
7444 23829f6f Iustin Pop
          # device exists
7445 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7446 cff90b79 Iustin Pop
7447 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
7448 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7449 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
7450 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7451 2bb5c911 Michael Hanselmann
7452 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
7453 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
7454 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
7455 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
7456 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7457 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
7458 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7459 cff90b79 Iustin Pop
7460 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
7461 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
7462 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
7463 a9e0c397 Iustin Pop
7464 cff90b79 Iustin Pop
      for disk in old_lvs:
7465 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
7466 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
7467 a9e0c397 Iustin Pop
7468 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
7469 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7470 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7471 4d4a651d Michael Hanselmann
                                                  new_lvs)
7472 4c4e4e1e Iustin Pop
      msg = result.fail_msg
7473 2cc1da8b Iustin Pop
      if msg:
7474 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
7475 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7476 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
7477 4c4e4e1e Iustin Pop
          if msg2:
7478 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7479 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
7480 2bb5c911 Michael Hanselmann
                                     "volumes"))
7481 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7482 a9e0c397 Iustin Pop
7483 a9e0c397 Iustin Pop
      dev.children = new_lvs
7484 a9e0c397 Iustin Pop
7485 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
7486 a9e0c397 Iustin Pop
7487 7ea7bcf6 Iustin Pop
    cstep = 5
7488 7ea7bcf6 Iustin Pop
    if self.early_release:
7489 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7490 7ea7bcf6 Iustin Pop
      cstep += 1
7491 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7492 d5cd389c Iustin Pop
      # WARNING: we release both node locks here, do not do other RPCs
7493 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7494 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.target_node, self.other_node])
7495 7ea7bcf6 Iustin Pop
7496 2bb5c911 Michael Hanselmann
    # Wait for sync
7497 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7498 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7499 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7500 7ea7bcf6 Iustin Pop
    cstep += 1
7501 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7502 a9e0c397 Iustin Pop
7503 2bb5c911 Michael Hanselmann
    # Check all devices manually
7504 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7505 a9e0c397 Iustin Pop
7506 cff90b79 Iustin Pop
    # Step: remove old storage
7507 7ea7bcf6 Iustin Pop
    if not self.early_release:
7508 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7509 7ea7bcf6 Iustin Pop
      cstep += 1
7510 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7511 a9e0c397 Iustin Pop
7512 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
7513 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
7514 a9e0c397 Iustin Pop

7515 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
7516 a9e0c397 Iustin Pop
      - for all disks of the instance:
7517 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
7518 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
7519 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
7520 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
7521 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
7522 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
7523 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
7524 a9e0c397 Iustin Pop
          not network enabled
7525 a9e0c397 Iustin Pop
      - wait for sync across all devices
7526 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
7527 a9e0c397 Iustin Pop

7528 a9e0c397 Iustin Pop
    Failures are not very well handled.
7529 0834c866 Iustin Pop

7530 a9e0c397 Iustin Pop
    """
7531 0834c866 Iustin Pop
    steps_total = 6
7532 0834c866 Iustin Pop
7533 0834c866 Iustin Pop
    # Step: check device activation
7534 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7535 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
7536 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
7537 0834c866 Iustin Pop
7538 0834c866 Iustin Pop
    # Step: check other node consistency
7539 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7540 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7541 0834c866 Iustin Pop
7542 0834c866 Iustin Pop
    # Step: create new storage
7543 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7544 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7545 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7546 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
7547 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
7548 a9e0c397 Iustin Pop
      for new_lv in dev.children:
7549 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7550 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7551 a9e0c397 Iustin Pop
7552 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
7553 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
7554 a1578d63 Iustin Pop
    # error and the success paths
7555 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7556 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7557 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
7558 2bb5c911 Michael Hanselmann
                                        self.instance.name)
7559 099c52ad Iustin Pop
    logging.debug("Allocated minors %r", minors)
7560 2bb5c911 Michael Hanselmann
7561 2bb5c911 Michael Hanselmann
    iv_names = {}
7562 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7563 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7564 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
7565 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
7566 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
7567 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
7568 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
7569 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7570 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
7571 a2d59d8b Iustin Pop
        p_minor = o_minor1
7572 ffa1c0dc Iustin Pop
      else:
7573 1122eb25 Iustin Pop
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7574 a2d59d8b Iustin Pop
        p_minor = o_minor2
7575 a2d59d8b Iustin Pop
7576 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7577 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
7578 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7579 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
7580 a2d59d8b Iustin Pop
7581 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
7582 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7583 a2d59d8b Iustin Pop
                    new_net_id)
7584 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7585 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
7586 8a6c7011 Iustin Pop
                              children=dev.children,
7587 8a6c7011 Iustin Pop
                              size=dev.size)
7588 796cab27 Iustin Pop
      try:
7589 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7590 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
7591 82759cb1 Iustin Pop
      except errors.GenericError:
7592 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7593 796cab27 Iustin Pop
        raise
7594 a9e0c397 Iustin Pop
7595 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
7596 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7597 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7598 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
7599 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7600 cacfd1fd Iustin Pop
      if msg:
7601 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7602 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
7603 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
7604 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
7605 a9e0c397 Iustin Pop
7606 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7607 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7608 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
7609 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
7610 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
7611 642445d9 Iustin Pop
7612 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7613 a2d59d8b Iustin Pop
    if msg:
7614 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
7615 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7616 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
7617 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
7618 642445d9 Iustin Pop
7619 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
7620 642445d9 Iustin Pop
    # the instance to point to the new secondary
7621 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
7622 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
7623 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
7624 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7625 2bb5c911 Michael Hanselmann
7626 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
7627 a9e0c397 Iustin Pop
7628 642445d9 Iustin Pop
    # and now perform the drbd attach
7629 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7630 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
7631 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7632 4d4a651d Michael Hanselmann
                                            self.new_node],
7633 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
7634 4d4a651d Michael Hanselmann
                                           self.instance.disks,
7635 4d4a651d Michael Hanselmann
                                           self.instance.name,
7636 a2d59d8b Iustin Pop
                                           False)
7637 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
7638 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
7639 a2d59d8b Iustin Pop
      if msg:
7640 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7641 4d4a651d Michael Hanselmann
                           to_node, msg,
7642 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
7643 2bb5c911 Michael Hanselmann
                                 " status of disks"))
7644 7ea7bcf6 Iustin Pop
    cstep = 5
7645 7ea7bcf6 Iustin Pop
    if self.early_release:
7646 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7647 7ea7bcf6 Iustin Pop
      cstep += 1
7648 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7649 d5cd389c Iustin Pop
      # WARNING: we release all node locks here, do not do other RPCs
7650 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7651 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.instance.primary_node,
7652 d5cd389c Iustin Pop
                             self.target_node,
7653 d5cd389c Iustin Pop
                             self.new_node])
7654 a9e0c397 Iustin Pop
7655 2bb5c911 Michael Hanselmann
    # Wait for sync
7656 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7657 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7658 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7659 7ea7bcf6 Iustin Pop
    cstep += 1
7660 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7661 a9e0c397 Iustin Pop
7662 2bb5c911 Michael Hanselmann
    # Check all devices manually
7663 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7664 22985314 Guido Trotter
7665 2bb5c911 Michael Hanselmann
    # Step: remove old storage
7666 7ea7bcf6 Iustin Pop
    if not self.early_release:
7667 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7668 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7669 a9e0c397 Iustin Pop
7670 a8083063 Iustin Pop
7671 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
7672 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
7673 76aef8fc Michael Hanselmann

7674 76aef8fc Michael Hanselmann
  """
7675 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
7676 76aef8fc Michael Hanselmann
  REQ_BGL = False
7677 76aef8fc Michael Hanselmann
7678 76aef8fc Michael Hanselmann
  def CheckArguments(self):
7679 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7680 76aef8fc Michael Hanselmann
7681 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
7682 0e3baaf3 Iustin Pop
7683 76aef8fc Michael Hanselmann
  def ExpandNames(self):
7684 76aef8fc Michael Hanselmann
    self.needed_locks = {
7685 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
7686 76aef8fc Michael Hanselmann
      }
7687 76aef8fc Michael Hanselmann
7688 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
7689 7e9c6a78 Iustin Pop
    """Ensure faulty disks abort the opcode or at least warn."""
7690 7e9c6a78 Iustin Pop
    try:
7691 7e9c6a78 Iustin Pop
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7692 7e9c6a78 Iustin Pop
                                  node_name, True):
7693 7e9c6a78 Iustin Pop
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7694 7e9c6a78 Iustin Pop
                                   " node '%s'" % (instance.name, node_name),
7695 7e9c6a78 Iustin Pop
                                   errors.ECODE_STATE)
7696 7e9c6a78 Iustin Pop
    except errors.OpPrereqError, err:
7697 7e9c6a78 Iustin Pop
      if self.op.ignore_consistency:
7698 7e9c6a78 Iustin Pop
        self.proc.LogWarning(str(err.args[0]))
7699 7e9c6a78 Iustin Pop
      else:
7700 7e9c6a78 Iustin Pop
        raise
7701 76aef8fc Michael Hanselmann
7702 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
7703 76aef8fc Michael Hanselmann
    """Check prerequisites.
7704 76aef8fc Michael Hanselmann

7705 76aef8fc Michael Hanselmann
    """
7706 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7707 76aef8fc Michael Hanselmann
7708 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7709 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7710 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7711 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7712 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7713 76aef8fc Michael Hanselmann
7714 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7715 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7716 7e9c6a78 Iustin Pop
      if not inst.admin_up:
7717 7e9c6a78 Iustin Pop
        continue
7718 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7719 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7720 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7721 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7722 76aef8fc Michael Hanselmann
7723 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7724 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7725 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7726 76aef8fc Michael Hanselmann
7727 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7728 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7729 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7730 76aef8fc Michael Hanselmann
                                           self.op.name,
7731 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7732 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7733 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7734 76aef8fc Michael Hanselmann
7735 76aef8fc Michael Hanselmann
7736 f7e7689f Iustin Pop
class LUNodeEvacuationStrategy(NoHooksLU):
7737 f7e7689f Iustin Pop
  """Computes the node evacuation strategy.
7738 f7e7689f Iustin Pop

7739 f7e7689f Iustin Pop
  """
7740 f7e7689f Iustin Pop
  _OP_REQP = ["nodes"]
7741 f7e7689f Iustin Pop
  REQ_BGL = False
7742 f7e7689f Iustin Pop
7743 f7e7689f Iustin Pop
  def CheckArguments(self):
7744 f7e7689f Iustin Pop
    if not hasattr(self.op, "remote_node"):
7745 f7e7689f Iustin Pop
      self.op.remote_node = None
7746 f7e7689f Iustin Pop
    if not hasattr(self.op, "iallocator"):
7747 f7e7689f Iustin Pop
      self.op.iallocator = None
7748 f7e7689f Iustin Pop
    if self.op.remote_node is not None and self.op.iallocator is not None:
7749 f7e7689f Iustin Pop
      raise errors.OpPrereqError("Give either the iallocator or the new"
7750 f7e7689f Iustin Pop
                                 " secondary, not both", errors.ECODE_INVAL)
7751 f7e7689f Iustin Pop
7752 f7e7689f Iustin Pop
  def ExpandNames(self):
7753 f7e7689f Iustin Pop
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7754 f7e7689f Iustin Pop
    self.needed_locks = locks = {}
7755 f7e7689f Iustin Pop
    if self.op.remote_node is None:
7756 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7757 f7e7689f Iustin Pop
    else:
7758 f7e7689f Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7759 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7760 f7e7689f Iustin Pop
7761 f7e7689f Iustin Pop
  def CheckPrereq(self):
7762 f7e7689f Iustin Pop
    pass
7763 f7e7689f Iustin Pop
7764 f7e7689f Iustin Pop
  def Exec(self, feedback_fn):
7765 f7e7689f Iustin Pop
    if self.op.remote_node is not None:
7766 f7e7689f Iustin Pop
      instances = []
7767 f7e7689f Iustin Pop
      for node in self.op.nodes:
7768 f7e7689f Iustin Pop
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7769 f7e7689f Iustin Pop
      result = []
7770 f7e7689f Iustin Pop
      for i in instances:
7771 f7e7689f Iustin Pop
        if i.primary_node == self.op.remote_node:
7772 f7e7689f Iustin Pop
          raise errors.OpPrereqError("Node %s is the primary node of"
7773 f7e7689f Iustin Pop
                                     " instance %s, cannot use it as"
7774 f7e7689f Iustin Pop
                                     " secondary" %
7775 f7e7689f Iustin Pop
                                     (self.op.remote_node, i.name),
7776 f7e7689f Iustin Pop
                                     errors.ECODE_INVAL)
7777 f7e7689f Iustin Pop
        result.append([i.name, self.op.remote_node])
7778 f7e7689f Iustin Pop
    else:
7779 f7e7689f Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
7780 f7e7689f Iustin Pop
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7781 f7e7689f Iustin Pop
                       evac_nodes=self.op.nodes)
7782 f7e7689f Iustin Pop
      ial.Run(self.op.iallocator, validate=True)
7783 f7e7689f Iustin Pop
      if not ial.success:
7784 f7e7689f Iustin Pop
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7785 f7e7689f Iustin Pop
                                 errors.ECODE_NORES)
7786 f7e7689f Iustin Pop
      result = ial.result
7787 f7e7689f Iustin Pop
    return result
7788 f7e7689f Iustin Pop
7789 f7e7689f Iustin Pop
7790 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7791 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7792 8729e0d7 Iustin Pop

7793 8729e0d7 Iustin Pop
  """
7794 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7795 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7796 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7797 31e63dbf Guido Trotter
  REQ_BGL = False
7798 31e63dbf Guido Trotter
7799 31e63dbf Guido Trotter
  def ExpandNames(self):
7800 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7801 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7802 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7803 31e63dbf Guido Trotter
7804 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7805 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7806 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7807 8729e0d7 Iustin Pop
7808 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7809 8729e0d7 Iustin Pop
    """Build hooks env.
7810 8729e0d7 Iustin Pop

7811 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7812 8729e0d7 Iustin Pop

7813 8729e0d7 Iustin Pop
    """
7814 8729e0d7 Iustin Pop
    env = {
7815 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7816 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7817 8729e0d7 Iustin Pop
      }
7818 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7819 abd8e836 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7820 8729e0d7 Iustin Pop
    return env, nl, nl
7821 8729e0d7 Iustin Pop
7822 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7823 8729e0d7 Iustin Pop
    """Check prerequisites.
7824 8729e0d7 Iustin Pop

7825 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7826 8729e0d7 Iustin Pop

7827 8729e0d7 Iustin Pop
    """
7828 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7829 31e63dbf Guido Trotter
    assert instance is not None, \
7830 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7831 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7832 6b12959c Iustin Pop
    for node in nodenames:
7833 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7834 7527a8a4 Iustin Pop
7835 31e63dbf Guido Trotter
7836 8729e0d7 Iustin Pop
    self.instance = instance
7837 8729e0d7 Iustin Pop
7838 728489a3 Guido Trotter
    if instance.disk_template not in constants.DTS_GROWABLE:
7839 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7840 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7841 8729e0d7 Iustin Pop
7842 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7843 8729e0d7 Iustin Pop
7844 2c42c5df Guido Trotter
    if instance.disk_template != constants.DT_FILE:
7845 2c42c5df Guido Trotter
      # TODO: check the free disk space for file, when that feature will be
7846 2c42c5df Guido Trotter
      # supported
7847 2c42c5df Guido Trotter
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
7848 8729e0d7 Iustin Pop
7849 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7850 8729e0d7 Iustin Pop
    """Execute disk grow.
7851 8729e0d7 Iustin Pop

7852 8729e0d7 Iustin Pop
    """
7853 8729e0d7 Iustin Pop
    instance = self.instance
7854 ad24e046 Iustin Pop
    disk = self.disk
7855 6b12959c Iustin Pop
    for node in instance.all_nodes:
7856 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7857 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7858 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7859 5bc556dd Michael Hanselmann
7860 5bc556dd Michael Hanselmann
      # TODO: Rewrite code to work properly
7861 5bc556dd Michael Hanselmann
      # DRBD goes into sync mode for a short amount of time after executing the
7862 5bc556dd Michael Hanselmann
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7863 5bc556dd Michael Hanselmann
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7864 5bc556dd Michael Hanselmann
      # time is a work-around.
7865 5bc556dd Michael Hanselmann
      time.sleep(5)
7866 5bc556dd Michael Hanselmann
7867 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7868 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7869 6605411d Iustin Pop
    if self.op.wait_for_sync:
7870 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7871 6605411d Iustin Pop
      if disk_abort:
7872 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7873 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7874 8729e0d7 Iustin Pop
7875 8729e0d7 Iustin Pop
7876 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7877 a8083063 Iustin Pop
  """Query runtime instance data.
7878 a8083063 Iustin Pop

7879 a8083063 Iustin Pop
  """
7880 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7881 a987fa48 Guido Trotter
  REQ_BGL = False
7882 ae5849b5 Michael Hanselmann
7883 a987fa48 Guido Trotter
  def ExpandNames(self):
7884 a987fa48 Guido Trotter
    self.needed_locks = {}
7885 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7886 a987fa48 Guido Trotter
7887 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7888 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7889 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7890 a987fa48 Guido Trotter
7891 a987fa48 Guido Trotter
    if self.op.instances:
7892 a987fa48 Guido Trotter
      self.wanted_names = []
7893 a987fa48 Guido Trotter
      for name in self.op.instances:
7894 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
7895 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7896 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7897 a987fa48 Guido Trotter
    else:
7898 a987fa48 Guido Trotter
      self.wanted_names = None
7899 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7900 a987fa48 Guido Trotter
7901 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7902 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7903 a987fa48 Guido Trotter
7904 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7905 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7906 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7907 a8083063 Iustin Pop
7908 a8083063 Iustin Pop
  def CheckPrereq(self):
7909 a8083063 Iustin Pop
    """Check prerequisites.
7910 a8083063 Iustin Pop

7911 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7912 a8083063 Iustin Pop

7913 a8083063 Iustin Pop
    """
7914 a987fa48 Guido Trotter
    if self.wanted_names is None:
7915 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7916 a8083063 Iustin Pop
7917 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7918 a987fa48 Guido Trotter
                             in self.wanted_names]
7919 a987fa48 Guido Trotter
    return
7920 a8083063 Iustin Pop
7921 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7922 98825740 Michael Hanselmann
    """Returns the status of a block device
7923 98825740 Michael Hanselmann

7924 98825740 Michael Hanselmann
    """
7925 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7926 98825740 Michael Hanselmann
      return None
7927 98825740 Michael Hanselmann
7928 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7929 98825740 Michael Hanselmann
7930 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7931 98825740 Michael Hanselmann
    if result.offline:
7932 98825740 Michael Hanselmann
      return None
7933 98825740 Michael Hanselmann
7934 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7935 98825740 Michael Hanselmann
7936 98825740 Michael Hanselmann
    status = result.payload
7937 ddfe2228 Michael Hanselmann
    if status is None:
7938 ddfe2228 Michael Hanselmann
      return None
7939 98825740 Michael Hanselmann
7940 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7941 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7942 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7943 98825740 Michael Hanselmann
7944 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7945 a8083063 Iustin Pop
    """Compute block device status.
7946 a8083063 Iustin Pop

7947 a8083063 Iustin Pop
    """
7948 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7949 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7950 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7951 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7952 a8083063 Iustin Pop
      else:
7953 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7954 a8083063 Iustin Pop
7955 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7956 98825740 Michael Hanselmann
                                              instance.name, dev)
7957 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7958 a8083063 Iustin Pop
7959 a8083063 Iustin Pop
    if dev.children:
7960 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7961 a8083063 Iustin Pop
                      for child in dev.children]
7962 a8083063 Iustin Pop
    else:
7963 a8083063 Iustin Pop
      dev_children = []
7964 a8083063 Iustin Pop
7965 a8083063 Iustin Pop
    data = {
7966 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7967 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7968 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7969 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7970 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7971 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7972 a8083063 Iustin Pop
      "children": dev_children,
7973 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7974 c98162a7 Iustin Pop
      "size": dev.size,
7975 a8083063 Iustin Pop
      }
7976 a8083063 Iustin Pop
7977 a8083063 Iustin Pop
    return data
7978 a8083063 Iustin Pop
7979 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7980 a8083063 Iustin Pop
    """Gather and return data"""
7981 a8083063 Iustin Pop
    result = {}
7982 338e51e8 Iustin Pop
7983 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7984 338e51e8 Iustin Pop
7985 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7986 57821cac Iustin Pop
      if not self.op.static:
7987 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7988 57821cac Iustin Pop
                                                  instance.name,
7989 57821cac Iustin Pop
                                                  instance.hypervisor)
7990 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7991 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7992 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7993 57821cac Iustin Pop
          remote_state = "up"
7994 57821cac Iustin Pop
        else:
7995 57821cac Iustin Pop
          remote_state = "down"
7996 a8083063 Iustin Pop
      else:
7997 57821cac Iustin Pop
        remote_state = None
7998 0d68c45d Iustin Pop
      if instance.admin_up:
7999 a8083063 Iustin Pop
        config_state = "up"
8000 0d68c45d Iustin Pop
      else:
8001 0d68c45d Iustin Pop
        config_state = "down"
8002 a8083063 Iustin Pop
8003 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
8004 a8083063 Iustin Pop
               for device in instance.disks]
8005 a8083063 Iustin Pop
8006 a8083063 Iustin Pop
      idict = {
8007 a8083063 Iustin Pop
        "name": instance.name,
8008 a8083063 Iustin Pop
        "config_state": config_state,
8009 a8083063 Iustin Pop
        "run_state": remote_state,
8010 a8083063 Iustin Pop
        "pnode": instance.primary_node,
8011 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
8012 a8083063 Iustin Pop
        "os": instance.os,
8013 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
8014 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
8015 a8083063 Iustin Pop
        "disks": disks,
8016 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
8017 24838135 Iustin Pop
        "network_port": instance.network_port,
8018 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
8019 7736a5f2 Iustin Pop
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
8020 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
8021 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
8022 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
8023 90f72445 Iustin Pop
        "mtime": instance.mtime,
8024 90f72445 Iustin Pop
        "ctime": instance.ctime,
8025 033d58b0 Iustin Pop
        "uuid": instance.uuid,
8026 a8083063 Iustin Pop
        }
8027 a8083063 Iustin Pop
8028 a8083063 Iustin Pop
      result[instance.name] = idict
8029 a8083063 Iustin Pop
8030 a8083063 Iustin Pop
    return result
8031 a8083063 Iustin Pop
8032 a8083063 Iustin Pop
8033 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
8034 a8083063 Iustin Pop
  """Modifies an instances's parameters.
8035 a8083063 Iustin Pop

8036 a8083063 Iustin Pop
  """
8037 a8083063 Iustin Pop
  HPATH = "instance-modify"
8038 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8039 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
8040 1a5c7281 Guido Trotter
  REQ_BGL = False
8041 1a5c7281 Guido Trotter
8042 24991749 Iustin Pop
  def CheckArguments(self):
8043 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
8044 24991749 Iustin Pop
      self.op.nics = []
8045 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
8046 24991749 Iustin Pop
      self.op.disks = []
8047 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
8048 24991749 Iustin Pop
      self.op.beparams = {}
8049 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
8050 24991749 Iustin Pop
      self.op.hvparams = {}
8051 e29e9550 Iustin Pop
    if not hasattr(self.op, "disk_template"):
8052 e29e9550 Iustin Pop
      self.op.disk_template = None
8053 e29e9550 Iustin Pop
    if not hasattr(self.op, "remote_node"):
8054 e29e9550 Iustin Pop
      self.op.remote_node = None
8055 96b39bcc Iustin Pop
    if not hasattr(self.op, "os_name"):
8056 96b39bcc Iustin Pop
      self.op.os_name = None
8057 96b39bcc Iustin Pop
    if not hasattr(self.op, "force_variant"):
8058 96b39bcc Iustin Pop
      self.op.force_variant = False
8059 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
8060 e29e9550 Iustin Pop
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8061 96b39bcc Iustin Pop
            self.op.hvparams or self.op.beparams or self.op.os_name):
8062 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8063 24991749 Iustin Pop
8064 7736a5f2 Iustin Pop
    if self.op.hvparams:
8065 7736a5f2 Iustin Pop
      _CheckGlobalHvParams(self.op.hvparams)
8066 7736a5f2 Iustin Pop
8067 24991749 Iustin Pop
    # Disk validation
8068 24991749 Iustin Pop
    disk_addremove = 0
8069 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8070 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8071 24991749 Iustin Pop
        disk_addremove += 1
8072 24991749 Iustin Pop
        continue
8073 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8074 24991749 Iustin Pop
        disk_addremove += 1
8075 24991749 Iustin Pop
      else:
8076 24991749 Iustin Pop
        if not isinstance(disk_op, int):
8077 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8078 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
8079 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8080 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8081 8b46606c Guido Trotter
8082 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
8083 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8084 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
8085 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8086 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8087 24991749 Iustin Pop
        size = disk_dict.get('size', None)
8088 24991749 Iustin Pop
        if size is None:
8089 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
8090 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8091 24991749 Iustin Pop
        try:
8092 24991749 Iustin Pop
          size = int(size)
8093 691744c4 Iustin Pop
        except (TypeError, ValueError), err:
8094 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8095 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
8096 24991749 Iustin Pop
        disk_dict['size'] = size
8097 24991749 Iustin Pop
      else:
8098 24991749 Iustin Pop
        # modification of disk
8099 24991749 Iustin Pop
        if 'size' in disk_dict:
8100 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
8101 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
8102 24991749 Iustin Pop
8103 24991749 Iustin Pop
    if disk_addremove > 1:
8104 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
8105 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8106 24991749 Iustin Pop
8107 e29e9550 Iustin Pop
    if self.op.disks and self.op.disk_template is not None:
8108 e29e9550 Iustin Pop
      raise errors.OpPrereqError("Disk template conversion and other disk"
8109 e29e9550 Iustin Pop
                                 " changes not supported at the same time",
8110 e29e9550 Iustin Pop
                                 errors.ECODE_INVAL)
8111 e29e9550 Iustin Pop
8112 e29e9550 Iustin Pop
    if self.op.disk_template:
8113 e29e9550 Iustin Pop
      _CheckDiskTemplate(self.op.disk_template)
8114 e29e9550 Iustin Pop
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
8115 e29e9550 Iustin Pop
          self.op.remote_node is None):
8116 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
8117 e29e9550 Iustin Pop
                                   " one requires specifying a secondary node",
8118 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8119 e29e9550 Iustin Pop
8120 24991749 Iustin Pop
    # NIC validation
8121 24991749 Iustin Pop
    nic_addremove = 0
8122 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8123 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8124 24991749 Iustin Pop
        nic_addremove += 1
8125 24991749 Iustin Pop
        continue
8126 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8127 24991749 Iustin Pop
        nic_addremove += 1
8128 24991749 Iustin Pop
      else:
8129 24991749 Iustin Pop
        if not isinstance(nic_op, int):
8130 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8131 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
8132 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8133 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8134 24991749 Iustin Pop
8135 24991749 Iustin Pop
      # nic_dict should be a dict
8136 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
8137 24991749 Iustin Pop
      if nic_ip is not None:
8138 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
8139 24991749 Iustin Pop
          nic_dict['ip'] = None
8140 24991749 Iustin Pop
        else:
8141 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
8142 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8143 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
8144 5c44da6a Guido Trotter
8145 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
8146 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
8147 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
8148 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8149 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
8150 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8151 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
8152 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8153 cd098c41 Guido Trotter
        nic_dict['link'] = None
8154 cd098c41 Guido Trotter
8155 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
8156 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
8157 5c44da6a Guido Trotter
        if nic_mac is None:
8158 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
8159 5c44da6a Guido Trotter
8160 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8161 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8162 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8163 82187135 Renรฉ Nussbaumer
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8164 82187135 Renรฉ Nussbaumer
8165 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8166 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8167 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
8168 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8169 5c44da6a Guido Trotter
8170 24991749 Iustin Pop
    if nic_addremove > 1:
8171 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8172 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8173 24991749 Iustin Pop
8174 1a5c7281 Guido Trotter
  def ExpandNames(self):
8175 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
8176 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
8177 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8178 74409b12 Iustin Pop
8179 74409b12 Iustin Pop
  def DeclareLocks(self, level):
8180 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
8181 74409b12 Iustin Pop
      self._LockInstancesNodes()
8182 e29e9550 Iustin Pop
      if self.op.disk_template and self.op.remote_node:
8183 e29e9550 Iustin Pop
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8184 e29e9550 Iustin Pop
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8185 a8083063 Iustin Pop
8186 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8187 a8083063 Iustin Pop
    """Build hooks env.
8188 a8083063 Iustin Pop

8189 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
8190 a8083063 Iustin Pop

8191 a8083063 Iustin Pop
    """
8192 396e1b78 Michael Hanselmann
    args = dict()
8193 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
8194 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
8195 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
8196 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8197 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8198 d8dcf3c9 Guido Trotter
    # information at all.
8199 d8dcf3c9 Guido Trotter
    if self.op.nics:
8200 d8dcf3c9 Guido Trotter
      args['nics'] = []
8201 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
8202 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
8203 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
8204 d8dcf3c9 Guido Trotter
        if idx in nic_override:
8205 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
8206 d8dcf3c9 Guido Trotter
        else:
8207 d8dcf3c9 Guido Trotter
          this_nic_override = {}
8208 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
8209 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
8210 d8dcf3c9 Guido Trotter
        else:
8211 d8dcf3c9 Guido Trotter
          ip = nic.ip
8212 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
8213 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
8214 d8dcf3c9 Guido Trotter
        else:
8215 d8dcf3c9 Guido Trotter
          mac = nic.mac
8216 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
8217 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
8218 62f0dd02 Guido Trotter
        else:
8219 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
8220 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8221 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8222 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8223 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
8224 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8225 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
8226 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
8227 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8228 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8229 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8230 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
8231 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
8232 d8dcf3c9 Guido Trotter
8233 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8234 e29e9550 Iustin Pop
    if self.op.disk_template:
8235 e29e9550 Iustin Pop
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8236 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8237 a8083063 Iustin Pop
    return env, nl, nl
8238 a8083063 Iustin Pop
8239 7e950d31 Iustin Pop
  @staticmethod
8240 7e950d31 Iustin Pop
  def _GetUpdatedParams(old_params, update_dict,
8241 0329617a Guido Trotter
                        default_values, parameter_types):
8242 0329617a Guido Trotter
    """Return the new params dict for the given params.
8243 0329617a Guido Trotter

8244 0329617a Guido Trotter
    @type old_params: dict
8245 f2fd87d7 Iustin Pop
    @param old_params: old parameters
8246 0329617a Guido Trotter
    @type update_dict: dict
8247 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
8248 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
8249 f2fd87d7 Iustin Pop
                        parameter to its default value
8250 0329617a Guido Trotter
    @type default_values: dict
8251 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
8252 0329617a Guido Trotter
    @type parameter_types: dict
8253 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
8254 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
8255 0329617a Guido Trotter
    @rtype: (dict, dict)
8256 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
8257 0329617a Guido Trotter

8258 0329617a Guido Trotter
    """
8259 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
8260 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
8261 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
8262 0329617a Guido Trotter
        try:
8263 0329617a Guido Trotter
          del params_copy[key]
8264 0329617a Guido Trotter
        except KeyError:
8265 0329617a Guido Trotter
          pass
8266 0329617a Guido Trotter
      else:
8267 0329617a Guido Trotter
        params_copy[key] = val
8268 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
8269 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
8270 0329617a Guido Trotter
    return (params_copy, params_filled)
8271 0329617a Guido Trotter
8272 a8083063 Iustin Pop
  def CheckPrereq(self):
8273 a8083063 Iustin Pop
    """Check prerequisites.
8274 a8083063 Iustin Pop

8275 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
8276 a8083063 Iustin Pop

8277 a8083063 Iustin Pop
    """
8278 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
8279 a8083063 Iustin Pop
8280 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
8281 31a853d2 Iustin Pop
8282 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8283 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
8284 1a5c7281 Guido Trotter
    assert self.instance is not None, \
8285 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
8286 6b12959c Iustin Pop
    pnode = instance.primary_node
8287 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
8288 74409b12 Iustin Pop
8289 e29e9550 Iustin Pop
    if self.op.disk_template:
8290 e29e9550 Iustin Pop
      if instance.disk_template == self.op.disk_template:
8291 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Instance already has disk template %s" %
8292 e29e9550 Iustin Pop
                                   instance.disk_template, errors.ECODE_INVAL)
8293 e29e9550 Iustin Pop
8294 e29e9550 Iustin Pop
      if (instance.disk_template,
8295 e29e9550 Iustin Pop
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8296 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8297 e29e9550 Iustin Pop
                                   " %s to %s" % (instance.disk_template,
8298 e29e9550 Iustin Pop
                                                  self.op.disk_template),
8299 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8300 e29e9550 Iustin Pop
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8301 e29e9550 Iustin Pop
        _CheckNodeOnline(self, self.op.remote_node)
8302 e29e9550 Iustin Pop
        _CheckNodeNotDrained(self, self.op.remote_node)
8303 e29e9550 Iustin Pop
        disks = [{"size": d.size} for d in instance.disks]
8304 e29e9550 Iustin Pop
        required = _ComputeDiskSize(self.op.disk_template, disks)
8305 e29e9550 Iustin Pop
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8306 e29e9550 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot change disk template")
8307 e29e9550 Iustin Pop
8308 338e51e8 Iustin Pop
    # hvparams processing
8309 74409b12 Iustin Pop
    if self.op.hvparams:
8310 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
8311 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
8312 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
8313 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
8314 74409b12 Iustin Pop
      # local check
8315 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
8316 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
8317 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8318 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
8319 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
8320 338e51e8 Iustin Pop
    else:
8321 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
8322 338e51e8 Iustin Pop
8323 338e51e8 Iustin Pop
    # beparams processing
8324 338e51e8 Iustin Pop
    if self.op.beparams:
8325 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
8326 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
8327 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
8328 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
8329 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
8330 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
8331 338e51e8 Iustin Pop
    else:
8332 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
8333 74409b12 Iustin Pop
8334 cfefe007 Guido Trotter
    self.warn = []
8335 647a5d80 Iustin Pop
8336 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
8337 647a5d80 Iustin Pop
      mem_check_list = [pnode]
8338 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8339 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
8340 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
8341 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8342 72737a7f Iustin Pop
                                                  instance.hypervisor)
8343 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8344 72737a7f Iustin Pop
                                         instance.hypervisor)
8345 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
8346 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
8347 070e998b Iustin Pop
      if msg:
8348 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
8349 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
8350 070e998b Iustin Pop
                         (pnode,  msg))
8351 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8352 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
8353 070e998b Iustin Pop
                         " free memory information" % pnode)
8354 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
8355 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
8356 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
8357 cfefe007 Guido Trotter
      else:
8358 7ad1af4a Iustin Pop
        if instance_info.payload:
8359 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
8360 cfefe007 Guido Trotter
        else:
8361 cfefe007 Guido Trotter
          # Assume instance not running
8362 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
8363 cfefe007 Guido Trotter
          # and we have no other way to check)
8364 cfefe007 Guido Trotter
          current_mem = 0
8365 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8366 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
8367 cfefe007 Guido Trotter
        if miss_mem > 0:
8368 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
8369 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
8370 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
8371 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
8372 cfefe007 Guido Trotter
8373 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8374 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
8375 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
8376 ea33068f Iustin Pop
            continue
8377 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
8378 070e998b Iustin Pop
          if msg:
8379 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
8380 070e998b Iustin Pop
                             (node, msg))
8381 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
8382 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
8383 070e998b Iustin Pop
                             " memory information" % node)
8384 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8385 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
8386 647a5d80 Iustin Pop
                             " secondary node %s" % node)
8387 5bc84f33 Alexander Schreiber
8388 24991749 Iustin Pop
    # NIC processing
8389 cd098c41 Guido Trotter
    self.nic_pnew = {}
8390 cd098c41 Guido Trotter
    self.nic_pinst = {}
8391 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8392 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8393 24991749 Iustin Pop
        if not instance.nics:
8394 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8395 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8396 24991749 Iustin Pop
        continue
8397 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
8398 24991749 Iustin Pop
        # an existing nic
8399 21bcb9aa Michael Hanselmann
        if not instance.nics:
8400 21bcb9aa Michael Hanselmann
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8401 21bcb9aa Michael Hanselmann
                                     " no NICs" % nic_op,
8402 21bcb9aa Michael Hanselmann
                                     errors.ECODE_INVAL)
8403 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
8404 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8405 24991749 Iustin Pop
                                     " are 0 to %d" %
8406 21bcb9aa Michael Hanselmann
                                     (nic_op, len(instance.nics) - 1),
8407 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8408 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
8409 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
8410 cd098c41 Guido Trotter
      else:
8411 cd098c41 Guido Trotter
        old_nic_params = {}
8412 cd098c41 Guido Trotter
        old_nic_ip = None
8413 cd098c41 Guido Trotter
8414 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
8415 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
8416 cd098c41 Guido Trotter
                                 if key in nic_dict])
8417 cd098c41 Guido Trotter
8418 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
8419 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8420 cd098c41 Guido Trotter
8421 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
8422 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
8423 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
8424 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
8425 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8426 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
8427 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
8428 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8429 cd098c41 Guido Trotter
8430 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8431 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8432 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8433 35c0c8da Iustin Pop
        if msg:
8434 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8435 24991749 Iustin Pop
          if self.force:
8436 24991749 Iustin Pop
            self.warn.append(msg)
8437 24991749 Iustin Pop
          else:
8438 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8439 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8440 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
8441 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
8442 cd098c41 Guido Trotter
        else:
8443 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
8444 cd098c41 Guido Trotter
        if nic_ip is None:
8445 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8446 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
8447 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8448 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8449 5c44da6a Guido Trotter
        if nic_mac is None:
8450 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8451 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8452 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8453 5c44da6a Guido Trotter
          # otherwise generate the mac
8454 36b66e6e Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8455 5c44da6a Guido Trotter
        else:
8456 5c44da6a Guido Trotter
          # or validate/reserve the current one
8457 36b66e6e Guido Trotter
          try:
8458 36b66e6e Guido Trotter
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8459 36b66e6e Guido Trotter
          except errors.ReservationError:
8460 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
8461 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
8462 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
8463 24991749 Iustin Pop
8464 24991749 Iustin Pop
    # DISK processing
8465 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8466 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
8467 5c983ee5 Iustin Pop
                                 " diskless instances",
8468 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8469 1122eb25 Iustin Pop
    for disk_op, _ in self.op.disks:
8470 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8471 24991749 Iustin Pop
        if len(instance.disks) == 1:
8472 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
8473 31624382 Iustin Pop
                                     " an instance", errors.ECODE_INVAL)
8474 31624382 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot remove disks")
8475 24991749 Iustin Pop
8476 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
8477 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
8478 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8479 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
8480 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
8481 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8482 24991749 Iustin Pop
        # an existing disk
8483 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
8484 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8485 24991749 Iustin Pop
                                     " are 0 to %d" %
8486 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
8487 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8488 24991749 Iustin Pop
8489 96b39bcc Iustin Pop
    # OS change
8490 96b39bcc Iustin Pop
    if self.op.os_name and not self.op.force:
8491 96b39bcc Iustin Pop
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8492 96b39bcc Iustin Pop
                      self.op.force_variant)
8493 96b39bcc Iustin Pop
8494 a8083063 Iustin Pop
    return
8495 a8083063 Iustin Pop
8496 e29e9550 Iustin Pop
  def _ConvertPlainToDrbd(self, feedback_fn):
8497 e29e9550 Iustin Pop
    """Converts an instance from plain to drbd.
8498 e29e9550 Iustin Pop

8499 e29e9550 Iustin Pop
    """
8500 e29e9550 Iustin Pop
    feedback_fn("Converting template to drbd")
8501 e29e9550 Iustin Pop
    instance = self.instance
8502 e29e9550 Iustin Pop
    pnode = instance.primary_node
8503 e29e9550 Iustin Pop
    snode = self.op.remote_node
8504 e29e9550 Iustin Pop
8505 e29e9550 Iustin Pop
    # create a fake disk info for _GenerateDiskTemplate
8506 e29e9550 Iustin Pop
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
8507 e29e9550 Iustin Pop
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
8508 e29e9550 Iustin Pop
                                      instance.name, pnode, [snode],
8509 e29e9550 Iustin Pop
                                      disk_info, None, None, 0)
8510 e29e9550 Iustin Pop
    info = _GetInstanceInfoText(instance)
8511 e29e9550 Iustin Pop
    feedback_fn("Creating aditional volumes...")
8512 e29e9550 Iustin Pop
    # first, create the missing data and meta devices
8513 e29e9550 Iustin Pop
    for disk in new_disks:
8514 e29e9550 Iustin Pop
      # unfortunately this is... not too nice
8515 e29e9550 Iustin Pop
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
8516 e29e9550 Iustin Pop
                            info, True)
8517 e29e9550 Iustin Pop
      for child in disk.children:
8518 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
8519 e29e9550 Iustin Pop
    # at this stage, all new LVs have been created, we can rename the
8520 e29e9550 Iustin Pop
    # old ones
8521 e29e9550 Iustin Pop
    feedback_fn("Renaming original volumes...")
8522 e29e9550 Iustin Pop
    rename_list = [(o, n.children[0].logical_id)
8523 e29e9550 Iustin Pop
                   for (o, n) in zip(instance.disks, new_disks)]
8524 e29e9550 Iustin Pop
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
8525 e29e9550 Iustin Pop
    result.Raise("Failed to rename original LVs")
8526 e29e9550 Iustin Pop
8527 e29e9550 Iustin Pop
    feedback_fn("Initializing DRBD devices...")
8528 e29e9550 Iustin Pop
    # all child devices are in place, we can now create the DRBD devices
8529 e29e9550 Iustin Pop
    for disk in new_disks:
8530 e29e9550 Iustin Pop
      for node in [pnode, snode]:
8531 e29e9550 Iustin Pop
        f_create = node == pnode
8532 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
8533 e29e9550 Iustin Pop
8534 e29e9550 Iustin Pop
    # at this point, the instance has been modified
8535 e29e9550 Iustin Pop
    instance.disk_template = constants.DT_DRBD8
8536 e29e9550 Iustin Pop
    instance.disks = new_disks
8537 e29e9550 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8538 e29e9550 Iustin Pop
8539 e29e9550 Iustin Pop
    # disks are created, waiting for sync
8540 e29e9550 Iustin Pop
    disk_abort = not _WaitForSync(self, instance)
8541 e29e9550 Iustin Pop
    if disk_abort:
8542 e29e9550 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
8543 e29e9550 Iustin Pop
                               " this instance, please cleanup manually")
8544 e29e9550 Iustin Pop
8545 2f414c48 Iustin Pop
  def _ConvertDrbdToPlain(self, feedback_fn):
8546 2f414c48 Iustin Pop
    """Converts an instance from drbd to plain.
8547 2f414c48 Iustin Pop

8548 2f414c48 Iustin Pop
    """
8549 2f414c48 Iustin Pop
    instance = self.instance
8550 2f414c48 Iustin Pop
    assert len(instance.secondary_nodes) == 1
8551 2f414c48 Iustin Pop
    pnode = instance.primary_node
8552 2f414c48 Iustin Pop
    snode = instance.secondary_nodes[0]
8553 2f414c48 Iustin Pop
    feedback_fn("Converting template to plain")
8554 2f414c48 Iustin Pop
8555 2f414c48 Iustin Pop
    old_disks = instance.disks
8556 2f414c48 Iustin Pop
    new_disks = [d.children[0] for d in old_disks]
8557 2f414c48 Iustin Pop
8558 2f414c48 Iustin Pop
    # copy over size and mode
8559 2f414c48 Iustin Pop
    for parent, child in zip(old_disks, new_disks):
8560 2f414c48 Iustin Pop
      child.size = parent.size
8561 2f414c48 Iustin Pop
      child.mode = parent.mode
8562 2f414c48 Iustin Pop
8563 2f414c48 Iustin Pop
    # update instance structure
8564 2f414c48 Iustin Pop
    instance.disks = new_disks
8565 2f414c48 Iustin Pop
    instance.disk_template = constants.DT_PLAIN
8566 2f414c48 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8567 2f414c48 Iustin Pop
8568 2f414c48 Iustin Pop
    feedback_fn("Removing volumes on the secondary node...")
8569 2f414c48 Iustin Pop
    for disk in old_disks:
8570 2f414c48 Iustin Pop
      self.cfg.SetDiskID(disk, snode)
8571 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
8572 2f414c48 Iustin Pop
      if msg:
8573 2f414c48 Iustin Pop
        self.LogWarning("Could not remove block device %s on node %s,"
8574 2f414c48 Iustin Pop
                        " continuing anyway: %s", disk.iv_name, snode, msg)
8575 2f414c48 Iustin Pop
8576 2f414c48 Iustin Pop
    feedback_fn("Removing unneeded volumes on the primary node...")
8577 2f414c48 Iustin Pop
    for idx, disk in enumerate(old_disks):
8578 2f414c48 Iustin Pop
      meta = disk.children[1]
8579 2f414c48 Iustin Pop
      self.cfg.SetDiskID(meta, pnode)
8580 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
8581 2f414c48 Iustin Pop
      if msg:
8582 2f414c48 Iustin Pop
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
8583 2f414c48 Iustin Pop
                        " continuing anyway: %s", idx, pnode, msg)
8584 2f414c48 Iustin Pop
8585 2f414c48 Iustin Pop
8586 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8587 a8083063 Iustin Pop
    """Modifies an instance.
8588 a8083063 Iustin Pop

8589 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
8590 24991749 Iustin Pop

8591 a8083063 Iustin Pop
    """
8592 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
8593 cfefe007 Guido Trotter
    # feedback_fn there.
8594 cfefe007 Guido Trotter
    for warn in self.warn:
8595 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
8596 cfefe007 Guido Trotter
8597 a8083063 Iustin Pop
    result = []
8598 a8083063 Iustin Pop
    instance = self.instance
8599 24991749 Iustin Pop
    # disk changes
8600 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8601 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8602 24991749 Iustin Pop
        # remove the last disk
8603 24991749 Iustin Pop
        device = instance.disks.pop()
8604 24991749 Iustin Pop
        device_idx = len(instance.disks)
8605 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8606 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
8607 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8608 e1bc0878 Iustin Pop
          if msg:
8609 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8610 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
8611 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
8612 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8613 24991749 Iustin Pop
        # add a new disk
8614 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
8615 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
8616 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
8617 24991749 Iustin Pop
        else:
8618 24991749 Iustin Pop
          file_driver = file_path = None
8619 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
8620 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
8621 24991749 Iustin Pop
                                         instance.disk_template,
8622 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
8623 24991749 Iustin Pop
                                         instance.secondary_nodes,
8624 24991749 Iustin Pop
                                         [disk_dict],
8625 24991749 Iustin Pop
                                         file_path,
8626 24991749 Iustin Pop
                                         file_driver,
8627 24991749 Iustin Pop
                                         disk_idx_base)[0]
8628 24991749 Iustin Pop
        instance.disks.append(new_disk)
8629 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
8630 24991749 Iustin Pop
8631 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
8632 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
8633 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
8634 24991749 Iustin Pop
        #HARDCODE
8635 428958aa Iustin Pop
        for node in instance.all_nodes:
8636 428958aa Iustin Pop
          f_create = node == instance.primary_node
8637 796cab27 Iustin Pop
          try:
8638 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
8639 428958aa Iustin Pop
                            f_create, info, f_create)
8640 1492cca7 Iustin Pop
          except errors.OpExecError, err:
8641 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
8642 428958aa Iustin Pop
                            " node %s: %s",
8643 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
8644 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8645 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
8646 24991749 Iustin Pop
      else:
8647 24991749 Iustin Pop
        # change a given disk
8648 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
8649 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8650 e29e9550 Iustin Pop
8651 e29e9550 Iustin Pop
    if self.op.disk_template:
8652 e29e9550 Iustin Pop
      r_shut = _ShutdownInstanceDisks(self, instance)
8653 e29e9550 Iustin Pop
      if not r_shut:
8654 e29e9550 Iustin Pop
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
8655 e29e9550 Iustin Pop
                                 " proceed with disk template conversion")
8656 e29e9550 Iustin Pop
      mode = (instance.disk_template, self.op.disk_template)
8657 e29e9550 Iustin Pop
      try:
8658 e29e9550 Iustin Pop
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
8659 e29e9550 Iustin Pop
      except:
8660 e29e9550 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
8661 e29e9550 Iustin Pop
        raise
8662 e29e9550 Iustin Pop
      result.append(("disk_template", self.op.disk_template))
8663 e29e9550 Iustin Pop
8664 24991749 Iustin Pop
    # NIC changes
8665 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8666 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8667 24991749 Iustin Pop
        # remove the last nic
8668 24991749 Iustin Pop
        del instance.nics[-1]
8669 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
8670 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8671 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
8672 5c44da6a Guido Trotter
        mac = nic_dict['mac']
8673 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
8674 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
8675 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8676 24991749 Iustin Pop
        instance.nics.append(new_nic)
8677 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
8678 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8679 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
8680 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8681 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8682 cd098c41 Guido Trotter
                       )))
8683 24991749 Iustin Pop
      else:
8684 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
8685 24991749 Iustin Pop
          if key in nic_dict:
8686 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
8687 beabf067 Guido Trotter
        if nic_op in self.nic_pinst:
8688 beabf067 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8689 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
8690 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
8691 24991749 Iustin Pop
8692 24991749 Iustin Pop
    # hvparams changes
8693 74409b12 Iustin Pop
    if self.op.hvparams:
8694 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
8695 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
8696 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
8697 24991749 Iustin Pop
8698 24991749 Iustin Pop
    # beparams changes
8699 338e51e8 Iustin Pop
    if self.op.beparams:
8700 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
8701 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
8702 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
8703 a8083063 Iustin Pop
8704 96b39bcc Iustin Pop
    # OS change
8705 96b39bcc Iustin Pop
    if self.op.os_name:
8706 96b39bcc Iustin Pop
      instance.os = self.op.os_name
8707 96b39bcc Iustin Pop
8708 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
8709 a8083063 Iustin Pop
8710 a8083063 Iustin Pop
    return result
8711 a8083063 Iustin Pop
8712 e29e9550 Iustin Pop
  _DISK_CONVERSIONS = {
8713 e29e9550 Iustin Pop
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
8714 2f414c48 Iustin Pop
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
8715 e29e9550 Iustin Pop
    }
8716 a8083063 Iustin Pop
8717 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
8718 a8083063 Iustin Pop
  """Query the exports list
8719 a8083063 Iustin Pop

8720 a8083063 Iustin Pop
  """
8721 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
8722 21a15682 Guido Trotter
  REQ_BGL = False
8723 21a15682 Guido Trotter
8724 21a15682 Guido Trotter
  def ExpandNames(self):
8725 21a15682 Guido Trotter
    self.needed_locks = {}
8726 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
8727 21a15682 Guido Trotter
    if not self.op.nodes:
8728 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8729 21a15682 Guido Trotter
    else:
8730 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
8731 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
8732 a8083063 Iustin Pop
8733 a8083063 Iustin Pop
  def CheckPrereq(self):
8734 21a15682 Guido Trotter
    """Check prerequisites.
8735 a8083063 Iustin Pop

8736 a8083063 Iustin Pop
    """
8737 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8738 a8083063 Iustin Pop
8739 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8740 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
8741 a8083063 Iustin Pop

8742 e4376078 Iustin Pop
    @rtype: dict
8743 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
8744 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
8745 e4376078 Iustin Pop
        that node.
8746 a8083063 Iustin Pop

8747 a8083063 Iustin Pop
    """
8748 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
8749 b04285f2 Guido Trotter
    result = {}
8750 b04285f2 Guido Trotter
    for node in rpcresult:
8751 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
8752 b04285f2 Guido Trotter
        result[node] = False
8753 b04285f2 Guido Trotter
      else:
8754 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
8755 b04285f2 Guido Trotter
8756 b04285f2 Guido Trotter
    return result
8757 a8083063 Iustin Pop
8758 a8083063 Iustin Pop
8759 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
8760 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
8761 a8083063 Iustin Pop

8762 a8083063 Iustin Pop
  """
8763 a8083063 Iustin Pop
  HPATH = "instance-export"
8764 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8765 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8766 6657590e Guido Trotter
  REQ_BGL = False
8767 6657590e Guido Trotter
8768 17c3f802 Guido Trotter
  def CheckArguments(self):
8769 17c3f802 Guido Trotter
    """Check the arguments.
8770 17c3f802 Guido Trotter

8771 17c3f802 Guido Trotter
    """
8772 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8773 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8774 17c3f802 Guido Trotter
8775 6657590e Guido Trotter
  def ExpandNames(self):
8776 6657590e Guido Trotter
    self._ExpandAndLockInstance()
8777 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
8778 6657590e Guido Trotter
    #
8779 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
8780 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
8781 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
8782 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
8783 6657590e Guido Trotter
    #    then one to remove, after
8784 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
8785 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8786 6657590e Guido Trotter
8787 6657590e Guido Trotter
  def DeclareLocks(self, level):
8788 6657590e Guido Trotter
    """Last minute lock declaration."""
8789 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
8790 a8083063 Iustin Pop
8791 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8792 a8083063 Iustin Pop
    """Build hooks env.
8793 a8083063 Iustin Pop

8794 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
8795 a8083063 Iustin Pop

8796 a8083063 Iustin Pop
    """
8797 a8083063 Iustin Pop
    env = {
8798 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
8799 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8800 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8801 a8083063 Iustin Pop
      }
8802 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8803 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8804 a8083063 Iustin Pop
          self.op.target_node]
8805 a8083063 Iustin Pop
    return env, nl, nl
8806 a8083063 Iustin Pop
8807 a8083063 Iustin Pop
  def CheckPrereq(self):
8808 a8083063 Iustin Pop
    """Check prerequisites.
8809 a8083063 Iustin Pop

8810 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
8811 a8083063 Iustin Pop

8812 a8083063 Iustin Pop
    """
8813 6657590e Guido Trotter
    instance_name = self.op.instance_name
8814 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8815 6657590e Guido Trotter
    assert self.instance is not None, \
8816 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
8817 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
8818 a8083063 Iustin Pop
8819 cf26a87a Iustin Pop
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8820 cf26a87a Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8821 cf26a87a Iustin Pop
    assert self.dst_node is not None
8822 a8083063 Iustin Pop
8823 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
8824 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
8825 a8083063 Iustin Pop
8826 b6023d6c Manuel Franceschini
    # instance disk type verification
8827 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
8828 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
8829 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
8830 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
8831 b6023d6c Manuel Franceschini
8832 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8833 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
8834 a8083063 Iustin Pop

8835 a8083063 Iustin Pop
    """
8836 a8083063 Iustin Pop
    instance = self.instance
8837 a8083063 Iustin Pop
    dst_node = self.dst_node
8838 a8083063 Iustin Pop
    src_node = instance.primary_node
8839 37972df0 Michael Hanselmann
8840 a8083063 Iustin Pop
    if self.op.shutdown:
8841 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
8842 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
8843 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
8844 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
8845 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
8846 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
8847 a8083063 Iustin Pop
8848 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
8849 a8083063 Iustin Pop
8850 a8083063 Iustin Pop
    snap_disks = []
8851 a8083063 Iustin Pop
8852 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
8853 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
8854 998c712c Iustin Pop
    for disk in instance.disks:
8855 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
8856 998c712c Iustin Pop
8857 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
8858 3e53a60b Michael Hanselmann
8859 3e53a60b Michael Hanselmann
    if activate_disks:
8860 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
8861 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
8862 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
8863 3e53a60b Michael Hanselmann
8864 a8083063 Iustin Pop
    try:
8865 3e53a60b Michael Hanselmann
      # per-disk results
8866 3e53a60b Michael Hanselmann
      dresults = []
8867 3e53a60b Michael Hanselmann
      try:
8868 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
8869 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
8870 3e53a60b Michael Hanselmann
                      (idx, src_node))
8871 3e53a60b Michael Hanselmann
8872 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
8873 3e53a60b Michael Hanselmann
          # passed
8874 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
8875 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8876 3e53a60b Michael Hanselmann
          if msg:
8877 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8878 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
8879 3e53a60b Michael Hanselmann
            snap_disks.append(False)
8880 3e53a60b Michael Hanselmann
          else:
8881 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
8882 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8883 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
8884 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
8885 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
8886 37972df0 Michael Hanselmann
8887 3e53a60b Michael Hanselmann
      finally:
8888 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
8889 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
8890 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
8891 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8892 3e53a60b Michael Hanselmann
          if msg:
8893 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
8894 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
8895 3e53a60b Michael Hanselmann
8896 3e53a60b Michael Hanselmann
      # TODO: check for size
8897 3e53a60b Michael Hanselmann
8898 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
8899 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
8900 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
8901 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
8902 3e53a60b Michael Hanselmann
        if dev:
8903 4a0e011f Iustin Pop
          # FIXME: pass debug from opcode to backend
8904 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
8905 4a0e011f Iustin Pop
                                                 instance, cluster_name,
8906 dd713605 Iustin Pop
                                                 idx, self.op.debug_level)
8907 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8908 3e53a60b Michael Hanselmann
          if msg:
8909 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
8910 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
8911 3e53a60b Michael Hanselmann
            dresults.append(False)
8912 3e53a60b Michael Hanselmann
          else:
8913 3e53a60b Michael Hanselmann
            dresults.append(True)
8914 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8915 3e53a60b Michael Hanselmann
          if msg:
8916 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8917 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
8918 19d7f90a Guido Trotter
        else:
8919 084f05a5 Iustin Pop
          dresults.append(False)
8920 a8083063 Iustin Pop
8921 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
8922 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8923 3e53a60b Michael Hanselmann
                                             snap_disks)
8924 3e53a60b Michael Hanselmann
      fin_resu = True
8925 3e53a60b Michael Hanselmann
      msg = result.fail_msg
8926 3e53a60b Michael Hanselmann
      if msg:
8927 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
8928 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
8929 3e53a60b Michael Hanselmann
        fin_resu = False
8930 3e53a60b Michael Hanselmann
8931 3e53a60b Michael Hanselmann
    finally:
8932 3e53a60b Michael Hanselmann
      if activate_disks:
8933 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
8934 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
8935 a8083063 Iustin Pop
8936 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
8937 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
8938 a8083063 Iustin Pop
8939 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
8940 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
8941 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
8942 35fbcd11 Iustin Pop
    iname = instance.name
8943 a8083063 Iustin Pop
    if nodelist:
8944 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8945 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
8946 a8083063 Iustin Pop
      for node in exportlist:
8947 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
8948 781de953 Iustin Pop
          continue
8949 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
8950 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8951 35fbcd11 Iustin Pop
          if msg:
8952 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
8953 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
8954 084f05a5 Iustin Pop
    return fin_resu, dresults
8955 5c947f38 Iustin Pop
8956 5c947f38 Iustin Pop
8957 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
8958 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
8959 9ac99fda Guido Trotter

8960 9ac99fda Guido Trotter
  """
8961 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
8962 3656b3af Guido Trotter
  REQ_BGL = False
8963 3656b3af Guido Trotter
8964 3656b3af Guido Trotter
  def ExpandNames(self):
8965 3656b3af Guido Trotter
    self.needed_locks = {}
8966 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
8967 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
8968 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
8969 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8970 9ac99fda Guido Trotter
8971 9ac99fda Guido Trotter
  def CheckPrereq(self):
8972 9ac99fda Guido Trotter
    """Check prerequisites.
8973 9ac99fda Guido Trotter
    """
8974 9ac99fda Guido Trotter
    pass
8975 9ac99fda Guido Trotter
8976 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
8977 9ac99fda Guido Trotter
    """Remove any export.
8978 9ac99fda Guido Trotter

8979 9ac99fda Guido Trotter
    """
8980 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
8981 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
8982 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
8983 9ac99fda Guido Trotter
    fqdn_warn = False
8984 9ac99fda Guido Trotter
    if not instance_name:
8985 9ac99fda Guido Trotter
      fqdn_warn = True
8986 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
8987 9ac99fda Guido Trotter
8988 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
8989 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
8990 9ac99fda Guido Trotter
    found = False
8991 9ac99fda Guido Trotter
    for node in exportlist:
8992 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
8993 1b7bfbb7 Iustin Pop
      if msg:
8994 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
8995 781de953 Iustin Pop
        continue
8996 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
8997 9ac99fda Guido Trotter
        found = True
8998 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
8999 4c4e4e1e Iustin Pop
        msg = result.fail_msg
9000 35fbcd11 Iustin Pop
        if msg:
9001 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
9002 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
9003 9ac99fda Guido Trotter
9004 9ac99fda Guido Trotter
    if fqdn_warn and not found:
9005 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
9006 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
9007 9ac99fda Guido Trotter
                  " Domain Name.")
9008 9ac99fda Guido Trotter
9009 9ac99fda Guido Trotter
9010 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9011 5c947f38 Iustin Pop
  """Generic tags LU.
9012 5c947f38 Iustin Pop

9013 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
9014 5c947f38 Iustin Pop

9015 5c947f38 Iustin Pop
  """
9016 5c947f38 Iustin Pop
9017 8646adce Guido Trotter
  def ExpandNames(self):
9018 8646adce Guido Trotter
    self.needed_locks = {}
9019 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
9020 cf26a87a Iustin Pop
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9021 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
9022 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
9023 cf26a87a Iustin Pop
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9024 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9025 8646adce Guido Trotter
9026 8646adce Guido Trotter
  def CheckPrereq(self):
9027 8646adce Guido Trotter
    """Check prerequisites.
9028 8646adce Guido Trotter

9029 8646adce Guido Trotter
    """
9030 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
9031 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
9032 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
9033 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
9034 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
9035 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
9036 5c947f38 Iustin Pop
    else:
9037 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9038 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
9039 5c947f38 Iustin Pop
9040 5c947f38 Iustin Pop
9041 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
9042 5c947f38 Iustin Pop
  """Returns the tags of a given object.
9043 5c947f38 Iustin Pop

9044 5c947f38 Iustin Pop
  """
9045 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
9046 8646adce Guido Trotter
  REQ_BGL = False
9047 5c947f38 Iustin Pop
9048 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9049 5c947f38 Iustin Pop
    """Returns the tag list.
9050 5c947f38 Iustin Pop

9051 5c947f38 Iustin Pop
    """
9052 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
9053 5c947f38 Iustin Pop
9054 5c947f38 Iustin Pop
9055 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
9056 73415719 Iustin Pop
  """Searches the tags for a given pattern.
9057 73415719 Iustin Pop

9058 73415719 Iustin Pop
  """
9059 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
9060 8646adce Guido Trotter
  REQ_BGL = False
9061 8646adce Guido Trotter
9062 8646adce Guido Trotter
  def ExpandNames(self):
9063 8646adce Guido Trotter
    self.needed_locks = {}
9064 73415719 Iustin Pop
9065 73415719 Iustin Pop
  def CheckPrereq(self):
9066 73415719 Iustin Pop
    """Check prerequisites.
9067 73415719 Iustin Pop

9068 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
9069 73415719 Iustin Pop

9070 73415719 Iustin Pop
    """
9071 73415719 Iustin Pop
    try:
9072 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
9073 73415719 Iustin Pop
    except re.error, err:
9074 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
9075 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
9076 73415719 Iustin Pop
9077 73415719 Iustin Pop
  def Exec(self, feedback_fn):
9078 73415719 Iustin Pop
    """Returns the tag list.
9079 73415719 Iustin Pop

9080 73415719 Iustin Pop
    """
9081 73415719 Iustin Pop
    cfg = self.cfg
9082 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
9083 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
9084 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
9085 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
9086 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
9087 73415719 Iustin Pop
    results = []
9088 73415719 Iustin Pop
    for path, target in tgts:
9089 73415719 Iustin Pop
      for tag in target.GetTags():
9090 73415719 Iustin Pop
        if self.re.search(tag):
9091 73415719 Iustin Pop
          results.append((path, tag))
9092 73415719 Iustin Pop
    return results
9093 73415719 Iustin Pop
9094 73415719 Iustin Pop
9095 f27302fa Iustin Pop
class LUAddTags(TagsLU):
9096 5c947f38 Iustin Pop
  """Sets a tag on a given object.
9097 5c947f38 Iustin Pop

9098 5c947f38 Iustin Pop
  """
9099 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
9100 8646adce Guido Trotter
  REQ_BGL = False
9101 5c947f38 Iustin Pop
9102 5c947f38 Iustin Pop
  def CheckPrereq(self):
9103 5c947f38 Iustin Pop
    """Check prerequisites.
9104 5c947f38 Iustin Pop

9105 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
9106 5c947f38 Iustin Pop

9107 5c947f38 Iustin Pop
    """
9108 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9109 f27302fa Iustin Pop
    for tag in self.op.tags:
9110 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9111 5c947f38 Iustin Pop
9112 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9113 5c947f38 Iustin Pop
    """Sets the tag.
9114 5c947f38 Iustin Pop

9115 5c947f38 Iustin Pop
    """
9116 5c947f38 Iustin Pop
    try:
9117 f27302fa Iustin Pop
      for tag in self.op.tags:
9118 f27302fa Iustin Pop
        self.target.AddTag(tag)
9119 5c947f38 Iustin Pop
    except errors.TagError, err:
9120 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
9121 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9122 5c947f38 Iustin Pop
9123 5c947f38 Iustin Pop
9124 f27302fa Iustin Pop
class LUDelTags(TagsLU):
9125 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
9126 5c947f38 Iustin Pop

9127 5c947f38 Iustin Pop
  """
9128 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
9129 8646adce Guido Trotter
  REQ_BGL = False
9130 5c947f38 Iustin Pop
9131 5c947f38 Iustin Pop
  def CheckPrereq(self):
9132 5c947f38 Iustin Pop
    """Check prerequisites.
9133 5c947f38 Iustin Pop

9134 5c947f38 Iustin Pop
    This checks that we have the given tag.
9135 5c947f38 Iustin Pop

9136 5c947f38 Iustin Pop
    """
9137 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9138 f27302fa Iustin Pop
    for tag in self.op.tags:
9139 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9140 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
9141 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
9142 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
9143 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
9144 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
9145 f27302fa Iustin Pop
      diff_names.sort()
9146 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
9147 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9148 5c947f38 Iustin Pop
9149 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9150 5c947f38 Iustin Pop
    """Remove the tag from the object.
9151 5c947f38 Iustin Pop

9152 5c947f38 Iustin Pop
    """
9153 f27302fa Iustin Pop
    for tag in self.op.tags:
9154 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
9155 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9156 06009e27 Iustin Pop
9157 0eed6e61 Guido Trotter
9158 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
9159 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
9160 06009e27 Iustin Pop

9161 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
9162 06009e27 Iustin Pop
  time.
9163 06009e27 Iustin Pop

9164 06009e27 Iustin Pop
  """
9165 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
9166 fbe9022f Guido Trotter
  REQ_BGL = False
9167 06009e27 Iustin Pop
9168 fbe9022f Guido Trotter
  def ExpandNames(self):
9169 fbe9022f Guido Trotter
    """Expand names and set required locks.
9170 06009e27 Iustin Pop

9171 fbe9022f Guido Trotter
    This expands the node list, if any.
9172 06009e27 Iustin Pop

9173 06009e27 Iustin Pop
    """
9174 fbe9022f Guido Trotter
    self.needed_locks = {}
9175 06009e27 Iustin Pop
    if self.op.on_nodes:
9176 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
9177 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9178 fbe9022f Guido Trotter
      # more information.
9179 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9180 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9181 fbe9022f Guido Trotter
9182 fbe9022f Guido Trotter
  def CheckPrereq(self):
9183 fbe9022f Guido Trotter
    """Check prerequisites.
9184 fbe9022f Guido Trotter

9185 fbe9022f Guido Trotter
    """
9186 06009e27 Iustin Pop
9187 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
9188 06009e27 Iustin Pop
    """Do the actual sleep.
9189 06009e27 Iustin Pop

9190 06009e27 Iustin Pop
    """
9191 06009e27 Iustin Pop
    if self.op.on_master:
9192 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
9193 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
9194 06009e27 Iustin Pop
    if self.op.on_nodes:
9195 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9196 06009e27 Iustin Pop
      for node, node_result in result.items():
9197 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
9198 d61df03e Iustin Pop
9199 d61df03e Iustin Pop
9200 d1c2dd75 Iustin Pop
class IAllocator(object):
9201 d1c2dd75 Iustin Pop
  """IAllocator framework.
9202 d61df03e Iustin Pop

9203 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
9204 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
9205 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
9206 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
9207 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
9208 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
9209 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
9210 d1c2dd75 Iustin Pop
      easy usage
9211 d61df03e Iustin Pop

9212 d61df03e Iustin Pop
  """
9213 7260cfbe Iustin Pop
  # pylint: disable-msg=R0902
9214 7260cfbe Iustin Pop
  # lots of instance attributes
9215 29859cb7 Iustin Pop
  _ALLO_KEYS = [
9216 8d3f86a0 Iustin Pop
    "name", "mem_size", "disks", "disk_template",
9217 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
9218 d1c2dd75 Iustin Pop
    ]
9219 29859cb7 Iustin Pop
  _RELO_KEYS = [
9220 8d3f86a0 Iustin Pop
    "name", "relocate_from",
9221 29859cb7 Iustin Pop
    ]
9222 7f60a422 Iustin Pop
  _EVAC_KEYS = [
9223 7f60a422 Iustin Pop
    "evac_nodes",
9224 7f60a422 Iustin Pop
    ]
9225 d1c2dd75 Iustin Pop
9226 8d3f86a0 Iustin Pop
  def __init__(self, cfg, rpc, mode, **kwargs):
9227 923ddac0 Michael Hanselmann
    self.cfg = cfg
9228 923ddac0 Michael Hanselmann
    self.rpc = rpc
9229 d1c2dd75 Iustin Pop
    # init buffer variables
9230 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
9231 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
9232 29859cb7 Iustin Pop
    self.mode = mode
9233 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
9234 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
9235 a0add446 Iustin Pop
    self.hypervisor = None
9236 29859cb7 Iustin Pop
    self.relocate_from = None
9237 8d3f86a0 Iustin Pop
    self.name = None
9238 7f60a422 Iustin Pop
    self.evac_nodes = None
9239 27579978 Iustin Pop
    # computed fields
9240 27579978 Iustin Pop
    self.required_nodes = None
9241 d1c2dd75 Iustin Pop
    # init result fields
9242 680f0a89 Iustin Pop
    self.success = self.info = self.result = None
9243 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9244 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
9245 9757cc90 Iustin Pop
      fn = self._AddNewInstance
9246 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9247 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
9248 9757cc90 Iustin Pop
      fn = self._AddRelocateInstance
9249 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9250 7f60a422 Iustin Pop
      keyset = self._EVAC_KEYS
9251 7f60a422 Iustin Pop
      fn = self._AddEvacuateNodes
9252 29859cb7 Iustin Pop
    else:
9253 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
9254 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
9255 d1c2dd75 Iustin Pop
    for key in kwargs:
9256 29859cb7 Iustin Pop
      if key not in keyset:
9257 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
9258 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9259 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
9260 7f60a422 Iustin Pop
9261 29859cb7 Iustin Pop
    for key in keyset:
9262 d1c2dd75 Iustin Pop
      if key not in kwargs:
9263 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
9264 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9265 9757cc90 Iustin Pop
    self._BuildInputData(fn)
9266 d1c2dd75 Iustin Pop
9267 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
9268 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
9269 d1c2dd75 Iustin Pop

9270 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
9271 d1c2dd75 Iustin Pop

9272 d1c2dd75 Iustin Pop
    """
9273 923ddac0 Michael Hanselmann
    cfg = self.cfg
9274 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
9275 d1c2dd75 Iustin Pop
    # cluster data
9276 d1c2dd75 Iustin Pop
    data = {
9277 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
9278 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
9279 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
9280 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
9281 d1c2dd75 Iustin Pop
      # we don't have job IDs
9282 d61df03e Iustin Pop
      }
9283 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
9284 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
9285 6286519f Iustin Pop
9286 d1c2dd75 Iustin Pop
    # node data
9287 d1c2dd75 Iustin Pop
    node_results = {}
9288 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
9289 8cc7e742 Guido Trotter
9290 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9291 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
9292 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9293 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
9294 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9295 7f60a422 Iustin Pop
      hypervisor_name = cluster_info.enabled_hypervisors[0]
9296 8cc7e742 Guido Trotter
9297 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
9298 923ddac0 Michael Hanselmann
                                        hypervisor_name)
9299 923ddac0 Michael Hanselmann
    node_iinfo = \
9300 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
9301 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
9302 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
9303 1325da74 Iustin Pop
      # first fill in static (config-based) values
9304 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
9305 d1c2dd75 Iustin Pop
      pnr = {
9306 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
9307 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
9308 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
9309 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
9310 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
9311 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
9312 d1c2dd75 Iustin Pop
        }
9313 1325da74 Iustin Pop
9314 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
9315 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
9316 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
9317 4c4e4e1e Iustin Pop
                                nname)
9318 070e998b Iustin Pop
        remote_info = nresult.payload
9319 b142ef15 Iustin Pop
9320 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
9321 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
9322 1325da74 Iustin Pop
          if attr not in remote_info:
9323 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
9324 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
9325 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
9326 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
9327 070e998b Iustin Pop
                                     " for '%s': %s" %
9328 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
9329 1325da74 Iustin Pop
        # compute memory used by primary instances
9330 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
9331 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
9332 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
9333 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
9334 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
9335 1325da74 Iustin Pop
              i_used_mem = 0
9336 1325da74 Iustin Pop
            else:
9337 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
9338 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
9339 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
9340 1325da74 Iustin Pop
9341 1325da74 Iustin Pop
            if iinfo.admin_up:
9342 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
9343 1325da74 Iustin Pop
9344 1325da74 Iustin Pop
        # compute memory used by instances
9345 1325da74 Iustin Pop
        pnr_dyn = {
9346 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
9347 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
9348 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
9349 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
9350 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
9351 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
9352 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
9353 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
9354 1325da74 Iustin Pop
          }
9355 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
9356 1325da74 Iustin Pop
9357 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
9358 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
9359 d1c2dd75 Iustin Pop
9360 d1c2dd75 Iustin Pop
    # instance data
9361 d1c2dd75 Iustin Pop
    instance_data = {}
9362 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
9363 a9fe7e8f Guido Trotter
      nic_data = []
9364 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
9365 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
9366 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
9367 a9fe7e8f Guido Trotter
            nic.nicparams)
9368 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
9369 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
9370 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
9371 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
9372 a9fe7e8f Guido Trotter
                   }
9373 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
9374 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
9375 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
9376 d1c2dd75 Iustin Pop
      pir = {
9377 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
9378 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
9379 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
9380 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
9381 d1c2dd75 Iustin Pop
        "os": iinfo.os,
9382 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
9383 d1c2dd75 Iustin Pop
        "nics": nic_data,
9384 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
9385 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
9386 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
9387 d1c2dd75 Iustin Pop
        }
9388 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
9389 88ae4f85 Iustin Pop
                                                 pir["disks"])
9390 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
9391 d61df03e Iustin Pop
9392 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
9393 d61df03e Iustin Pop
9394 d1c2dd75 Iustin Pop
    self.in_data = data
9395 d61df03e Iustin Pop
9396 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
9397 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
9398 d61df03e Iustin Pop

9399 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
9400 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9401 d61df03e Iustin Pop

9402 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9403 d1c2dd75 Iustin Pop
    done.
9404 d61df03e Iustin Pop

9405 d1c2dd75 Iustin Pop
    """
9406 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
9407 d1c2dd75 Iustin Pop
9408 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
9409 27579978 Iustin Pop
      self.required_nodes = 2
9410 27579978 Iustin Pop
    else:
9411 27579978 Iustin Pop
      self.required_nodes = 1
9412 d1c2dd75 Iustin Pop
    request = {
9413 d1c2dd75 Iustin Pop
      "name": self.name,
9414 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
9415 d1c2dd75 Iustin Pop
      "tags": self.tags,
9416 d1c2dd75 Iustin Pop
      "os": self.os,
9417 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
9418 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
9419 d1c2dd75 Iustin Pop
      "disks": self.disks,
9420 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
9421 d1c2dd75 Iustin Pop
      "nics": self.nics,
9422 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9423 d1c2dd75 Iustin Pop
      }
9424 9757cc90 Iustin Pop
    return request
9425 298fe380 Iustin Pop
9426 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
9427 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
9428 298fe380 Iustin Pop

9429 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
9430 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9431 d61df03e Iustin Pop

9432 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9433 d1c2dd75 Iustin Pop
    done.
9434 d61df03e Iustin Pop

9435 d1c2dd75 Iustin Pop
    """
9436 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
9437 27579978 Iustin Pop
    if instance is None:
9438 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
9439 27579978 Iustin Pop
                                   " IAllocator" % self.name)
9440 27579978 Iustin Pop
9441 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
9442 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
9443 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
9444 27579978 Iustin Pop
9445 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
9446 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
9447 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
9448 2a139bb0 Iustin Pop
9449 27579978 Iustin Pop
    self.required_nodes = 1
9450 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
9451 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
9452 27579978 Iustin Pop
9453 d1c2dd75 Iustin Pop
    request = {
9454 d1c2dd75 Iustin Pop
      "name": self.name,
9455 27579978 Iustin Pop
      "disk_space_total": disk_space,
9456 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9457 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
9458 d1c2dd75 Iustin Pop
      }
9459 9757cc90 Iustin Pop
    return request
9460 d61df03e Iustin Pop
9461 7f60a422 Iustin Pop
  def _AddEvacuateNodes(self):
9462 7f60a422 Iustin Pop
    """Add evacuate nodes data to allocator structure.
9463 7f60a422 Iustin Pop

9464 7f60a422 Iustin Pop
    """
9465 7f60a422 Iustin Pop
    request = {
9466 7f60a422 Iustin Pop
      "evac_nodes": self.evac_nodes
9467 7f60a422 Iustin Pop
      }
9468 7f60a422 Iustin Pop
    return request
9469 7f60a422 Iustin Pop
9470 9757cc90 Iustin Pop
  def _BuildInputData(self, fn):
9471 d1c2dd75 Iustin Pop
    """Build input data structures.
9472 d61df03e Iustin Pop

9473 d1c2dd75 Iustin Pop
    """
9474 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
9475 d61df03e Iustin Pop
9476 9757cc90 Iustin Pop
    request = fn()
9477 9757cc90 Iustin Pop
    request["type"] = self.mode
9478 9757cc90 Iustin Pop
    self.in_data["request"] = request
9479 d61df03e Iustin Pop
9480 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
9481 d61df03e Iustin Pop
9482 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
9483 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
9484 298fe380 Iustin Pop

9485 d1c2dd75 Iustin Pop
    """
9486 72737a7f Iustin Pop
    if call_fn is None:
9487 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
9488 298fe380 Iustin Pop
9489 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
9490 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
9491 8d528b7c Iustin Pop
9492 87f5c298 Iustin Pop
    self.out_text = result.payload
9493 d1c2dd75 Iustin Pop
    if validate:
9494 d1c2dd75 Iustin Pop
      self._ValidateResult()
9495 298fe380 Iustin Pop
9496 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
9497 d1c2dd75 Iustin Pop
    """Process the allocator results.
9498 538475ca Iustin Pop

9499 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
9500 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
9501 538475ca Iustin Pop

9502 d1c2dd75 Iustin Pop
    """
9503 d1c2dd75 Iustin Pop
    try:
9504 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
9505 d1c2dd75 Iustin Pop
    except Exception, err:
9506 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
9507 d1c2dd75 Iustin Pop
9508 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
9509 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
9510 538475ca Iustin Pop
9511 680f0a89 Iustin Pop
    # TODO: remove backwards compatiblity in later versions
9512 680f0a89 Iustin Pop
    if "nodes" in rdict and "result" not in rdict:
9513 680f0a89 Iustin Pop
      rdict["result"] = rdict["nodes"]
9514 680f0a89 Iustin Pop
      del rdict["nodes"]
9515 680f0a89 Iustin Pop
9516 680f0a89 Iustin Pop
    for key in "success", "info", "result":
9517 d1c2dd75 Iustin Pop
      if key not in rdict:
9518 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
9519 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
9520 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
9521 538475ca Iustin Pop
9522 680f0a89 Iustin Pop
    if not isinstance(rdict["result"], list):
9523 680f0a89 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
9524 d1c2dd75 Iustin Pop
                               " is not a list")
9525 d1c2dd75 Iustin Pop
    self.out_data = rdict
9526 538475ca Iustin Pop
9527 538475ca Iustin Pop
9528 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
9529 d61df03e Iustin Pop
  """Run allocator tests.
9530 d61df03e Iustin Pop

9531 d61df03e Iustin Pop
  This LU runs the allocator tests
9532 d61df03e Iustin Pop

9533 d61df03e Iustin Pop
  """
9534 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
9535 d61df03e Iustin Pop
9536 d61df03e Iustin Pop
  def CheckPrereq(self):
9537 d61df03e Iustin Pop
    """Check prerequisites.
9538 d61df03e Iustin Pop

9539 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
9540 d61df03e Iustin Pop

9541 d61df03e Iustin Pop
    """
9542 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9543 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
9544 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
9545 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
9546 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
9547 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
9548 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
9549 d61df03e Iustin Pop
      if iname is not None:
9550 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
9551 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
9552 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
9553 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
9554 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9555 d61df03e Iustin Pop
      for row in self.op.nics:
9556 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9557 d61df03e Iustin Pop
            "mac" not in row or
9558 d61df03e Iustin Pop
            "ip" not in row or
9559 d61df03e Iustin Pop
            "bridge" not in row):
9560 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
9561 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9562 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
9563 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
9564 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9565 d61df03e Iustin Pop
      for row in self.op.disks:
9566 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9567 d61df03e Iustin Pop
            "size" not in row or
9568 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
9569 d61df03e Iustin Pop
            "mode" not in row or
9570 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
9571 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
9572 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9573 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
9574 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
9575 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9576 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
9577 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
9578 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9579 cf26a87a Iustin Pop
      fname = _ExpandInstanceName(self.cfg, self.op.name)
9580 d61df03e Iustin Pop
      self.op.name = fname
9581 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
9582 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9583 823a72bc Iustin Pop
      if not hasattr(self.op, "evac_nodes"):
9584 823a72bc Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
9585 823a72bc Iustin Pop
                                   " opcode input", errors.ECODE_INVAL)
9586 d61df03e Iustin Pop
    else:
9587 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
9588 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
9589 d61df03e Iustin Pop
9590 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
9591 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
9592 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
9593 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9594 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
9595 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
9596 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
9597 d61df03e Iustin Pop
9598 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
9599 d61df03e Iustin Pop
    """Run the allocator test.
9600 d61df03e Iustin Pop

9601 d61df03e Iustin Pop
    """
9602 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9603 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9604 29859cb7 Iustin Pop
                       mode=self.op.mode,
9605 29859cb7 Iustin Pop
                       name=self.op.name,
9606 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
9607 29859cb7 Iustin Pop
                       disks=self.op.disks,
9608 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
9609 29859cb7 Iustin Pop
                       os=self.op.os,
9610 29859cb7 Iustin Pop
                       tags=self.op.tags,
9611 29859cb7 Iustin Pop
                       nics=self.op.nics,
9612 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
9613 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
9614 29859cb7 Iustin Pop
                       )
9615 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9616 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9617 29859cb7 Iustin Pop
                       mode=self.op.mode,
9618 29859cb7 Iustin Pop
                       name=self.op.name,
9619 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
9620 29859cb7 Iustin Pop
                       )
9621 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9622 823a72bc Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
9623 823a72bc Iustin Pop
                       mode=self.op.mode,
9624 823a72bc Iustin Pop
                       evac_nodes=self.op.evac_nodes)
9625 823a72bc Iustin Pop
    else:
9626 823a72bc Iustin Pop
      raise errors.ProgrammerError("Uncatched mode %s in"
9627 823a72bc Iustin Pop
                                   " LUTestAllocator.Exec", self.op.mode)
9628 d61df03e Iustin Pop
9629 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9630 d1c2dd75 Iustin Pop
      result = ial.in_text
9631 298fe380 Iustin Pop
    else:
9632 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
9633 d1c2dd75 Iustin Pop
      result = ial.out_text
9634 298fe380 Iustin Pop
    return result