Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 9f3ac970

History | View | Annotate | Download (338.5 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 b98bf262 Michael Hanselmann
import OpenSSL
37 a8083063 Iustin Pop
38 a8083063 Iustin Pop
from ganeti import ssh
39 a8083063 Iustin Pop
from ganeti import utils
40 a8083063 Iustin Pop
from ganeti import errors
41 a8083063 Iustin Pop
from ganeti import hypervisor
42 6048c986 Guido Trotter
from ganeti import locking
43 a8083063 Iustin Pop
from ganeti import constants
44 a8083063 Iustin Pop
from ganeti import objects
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 1338f2b4 Balazs Lecz
from ganeti import uidpool
48 d61df03e Iustin Pop
49 d61df03e Iustin Pop
50 a8083063 Iustin Pop
class LogicalUnit(object):
51 396e1b78 Michael Hanselmann
  """Logical Unit base class.
52 a8083063 Iustin Pop

53 a8083063 Iustin Pop
  Subclasses must follow these rules:
54 d465bdc8 Guido Trotter
    - implement ExpandNames
55 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
56 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
57 a8083063 Iustin Pop
    - implement BuildHooksEnv
58 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
59 05f86716 Guido Trotter
    - optionally redefine their run requirements:
60 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
61 05f86716 Guido Trotter

62 05f86716 Guido Trotter
  Note that all commands require root permissions.
63 a8083063 Iustin Pop

64 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
65 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
66 20777413 Iustin Pop

67 a8083063 Iustin Pop
  """
68 a8083063 Iustin Pop
  HPATH = None
69 a8083063 Iustin Pop
  HTYPE = None
70 a8083063 Iustin Pop
  _OP_REQP = []
71 7e55040e Guido Trotter
  REQ_BGL = True
72 a8083063 Iustin Pop
73 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
74 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
75 a8083063 Iustin Pop

76 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
77 a8083063 Iustin Pop
    validity.
78 a8083063 Iustin Pop

79 a8083063 Iustin Pop
    """
80 5bfac263 Iustin Pop
    self.proc = processor
81 a8083063 Iustin Pop
    self.op = op
82 77b657a3 Guido Trotter
    self.cfg = context.cfg
83 77b657a3 Guido Trotter
    self.context = context
84 72737a7f Iustin Pop
    self.rpc = rpc
85 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
86 d465bdc8 Guido Trotter
    self.needed_locks = None
87 6683bba2 Guido Trotter
    self.acquired_locks = {}
88 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
89 ca2a79e1 Guido Trotter
    self.add_locks = {}
90 ca2a79e1 Guido Trotter
    self.remove_locks = {}
91 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
92 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
93 c92b310a Michael Hanselmann
    self.__ssh = None
94 86d9d3bb Iustin Pop
    # logging
95 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
96 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
97 d984846d Iustin Pop
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
98 20777413 Iustin Pop
    # support for dry-run
99 20777413 Iustin Pop
    self.dry_run_result = None
100 ee844e20 Iustin Pop
    # support for generic debug attribute
101 ee844e20 Iustin Pop
    if (not hasattr(self.op, "debug_level") or
102 ee844e20 Iustin Pop
        not isinstance(self.op.debug_level, int)):
103 ee844e20 Iustin Pop
      self.op.debug_level = 0
104 c92b310a Michael Hanselmann
105 6fd35c4d Michael Hanselmann
    # Tasklets
106 3a012b41 Michael Hanselmann
    self.tasklets = None
107 6fd35c4d Michael Hanselmann
108 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
109 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
110 a8083063 Iustin Pop
      if attr_val is None:
111 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
112 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
113 6fd35c4d Michael Hanselmann
114 4be4691d Iustin Pop
    self.CheckArguments()
115 a8083063 Iustin Pop
116 c92b310a Michael Hanselmann
  def __GetSSH(self):
117 c92b310a Michael Hanselmann
    """Returns the SshRunner object
118 c92b310a Michael Hanselmann

119 c92b310a Michael Hanselmann
    """
120 c92b310a Michael Hanselmann
    if not self.__ssh:
121 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
122 c92b310a Michael Hanselmann
    return self.__ssh
123 c92b310a Michael Hanselmann
124 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
125 c92b310a Michael Hanselmann
126 4be4691d Iustin Pop
  def CheckArguments(self):
127 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
128 4be4691d Iustin Pop

129 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
130 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
131 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
132 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
133 4be4691d Iustin Pop

134 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
135 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
136 4be4691d Iustin Pop
        waited for them)
137 4be4691d Iustin Pop

138 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
139 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
140 4be4691d Iustin Pop

141 4be4691d Iustin Pop
    """
142 4be4691d Iustin Pop
    pass
143 4be4691d Iustin Pop
144 d465bdc8 Guido Trotter
  def ExpandNames(self):
145 d465bdc8 Guido Trotter
    """Expand names for this LU.
146 d465bdc8 Guido Trotter

147 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
148 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
149 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
150 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
151 d465bdc8 Guido Trotter

152 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
153 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
154 d465bdc8 Guido Trotter
    as values. Rules:
155 e4376078 Iustin Pop

156 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
157 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
158 e4376078 Iustin Pop
      - don't put anything for the BGL level
159 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
160 d465bdc8 Guido Trotter

161 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
162 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
163 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
164 3977a4c1 Guido Trotter

165 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
166 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
167 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
168 6fd35c4d Michael Hanselmann

169 e4376078 Iustin Pop
    Examples::
170 e4376078 Iustin Pop

171 e4376078 Iustin Pop
      # Acquire all nodes and one instance
172 e4376078 Iustin Pop
      self.needed_locks = {
173 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
174 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
175 e4376078 Iustin Pop
      }
176 e4376078 Iustin Pop
      # Acquire just two nodes
177 e4376078 Iustin Pop
      self.needed_locks = {
178 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
179 e4376078 Iustin Pop
      }
180 e4376078 Iustin Pop
      # Acquire no locks
181 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
182 d465bdc8 Guido Trotter

183 d465bdc8 Guido Trotter
    """
184 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
185 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
186 d465bdc8 Guido Trotter
    # time.
187 d465bdc8 Guido Trotter
    if self.REQ_BGL:
188 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
189 d465bdc8 Guido Trotter
    else:
190 d465bdc8 Guido Trotter
      raise NotImplementedError
191 d465bdc8 Guido Trotter
192 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
193 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
194 fb8dcb62 Guido Trotter

195 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
196 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
197 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
198 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
199 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
200 fb8dcb62 Guido Trotter
    default it does nothing.
201 fb8dcb62 Guido Trotter

202 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
203 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
204 fb8dcb62 Guido Trotter

205 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
206 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
207 fb8dcb62 Guido Trotter

208 fb8dcb62 Guido Trotter
    """
209 fb8dcb62 Guido Trotter
210 a8083063 Iustin Pop
  def CheckPrereq(self):
211 a8083063 Iustin Pop
    """Check prerequisites for this LU.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
214 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
215 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
216 a8083063 Iustin Pop
    allowed.
217 a8083063 Iustin Pop

218 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
219 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
222 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
223 a8083063 Iustin Pop

224 a8083063 Iustin Pop
    """
225 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
226 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
227 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
228 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
229 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
230 6fd35c4d Michael Hanselmann
    else:
231 6fd35c4d Michael Hanselmann
      raise NotImplementedError
232 a8083063 Iustin Pop
233 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
234 a8083063 Iustin Pop
    """Execute the LU.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
237 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
238 a8083063 Iustin Pop
    code, or expected.
239 a8083063 Iustin Pop

240 a8083063 Iustin Pop
    """
241 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
242 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
243 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
244 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
245 6fd35c4d Michael Hanselmann
    else:
246 6fd35c4d Michael Hanselmann
      raise NotImplementedError
247 a8083063 Iustin Pop
248 a8083063 Iustin Pop
  def BuildHooksEnv(self):
249 a8083063 Iustin Pop
    """Build hooks environment for this LU.
250 a8083063 Iustin Pop

251 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
252 a8083063 Iustin Pop
    containing the environment that will be used for running the
253 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
254 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
255 a8083063 Iustin Pop
    the hook should run after the execution.
256 a8083063 Iustin Pop

257 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
258 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
259 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
260 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
261 a8083063 Iustin Pop

262 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
263 a8083063 Iustin Pop

264 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
265 a8083063 Iustin Pop
    not be called.
266 a8083063 Iustin Pop

267 a8083063 Iustin Pop
    """
268 a8083063 Iustin Pop
    raise NotImplementedError
269 a8083063 Iustin Pop
270 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
271 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
272 1fce5219 Guido Trotter

273 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
274 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
275 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
276 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
277 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
278 1fce5219 Guido Trotter

279 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
280 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
281 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
282 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
283 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
284 e4376078 Iustin Pop
        in the PRE phase
285 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
286 e4376078 Iustin Pop
        and hook results
287 1fce5219 Guido Trotter

288 1fce5219 Guido Trotter
    """
289 2d54e29c Iustin Pop
    # API must be kept, thus we ignore the unused argument and could
290 2d54e29c Iustin Pop
    # be a function warnings
291 2d54e29c Iustin Pop
    # pylint: disable-msg=W0613,R0201
292 1fce5219 Guido Trotter
    return lu_result
293 1fce5219 Guido Trotter
294 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
295 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
296 43905206 Guido Trotter

297 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
298 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
299 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
300 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
301 43905206 Guido Trotter
    before.
302 43905206 Guido Trotter

303 43905206 Guido Trotter
    """
304 43905206 Guido Trotter
    if self.needed_locks is None:
305 43905206 Guido Trotter
      self.needed_locks = {}
306 43905206 Guido Trotter
    else:
307 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
308 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
309 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
310 cf26a87a Iustin Pop
                                                self.op.instance_name)
311 cf26a87a Iustin Pop
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
312 43905206 Guido Trotter
313 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
314 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
315 c4a2fee1 Guido Trotter

316 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
317 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
318 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
319 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
320 c4a2fee1 Guido Trotter

321 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
322 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
323 c4a2fee1 Guido Trotter

324 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
325 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
326 c4a2fee1 Guido Trotter

327 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
328 c4a2fee1 Guido Trotter

329 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
330 e4376078 Iustin Pop
        self._LockInstancesNodes()
331 c4a2fee1 Guido Trotter

332 a82ce292 Guido Trotter
    @type primary_only: boolean
333 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
334 a82ce292 Guido Trotter

335 c4a2fee1 Guido Trotter
    """
336 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
337 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
338 c4a2fee1 Guido Trotter
339 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
340 c4a2fee1 Guido Trotter
341 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
342 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
343 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
344 c4a2fee1 Guido Trotter
    wanted_nodes = []
345 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
346 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
347 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
348 a82ce292 Guido Trotter
      if not primary_only:
349 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
350 9513b6ab Guido Trotter
351 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
352 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
353 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
354 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
355 c4a2fee1 Guido Trotter
356 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
357 c4a2fee1 Guido Trotter
358 a8083063 Iustin Pop
359 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
360 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
361 a8083063 Iustin Pop

362 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
363 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
364 a8083063 Iustin Pop

365 a8083063 Iustin Pop
  """
366 a8083063 Iustin Pop
  HPATH = None
367 a8083063 Iustin Pop
  HTYPE = None
368 a8083063 Iustin Pop
369 fc8a6b8f Iustin Pop
  def BuildHooksEnv(self):
370 fc8a6b8f Iustin Pop
    """Empty BuildHooksEnv for NoHooksLu.
371 fc8a6b8f Iustin Pop

372 fc8a6b8f Iustin Pop
    This just raises an error.
373 fc8a6b8f Iustin Pop

374 fc8a6b8f Iustin Pop
    """
375 fc8a6b8f Iustin Pop
    assert False, "BuildHooksEnv called for NoHooksLUs"
376 fc8a6b8f Iustin Pop
377 a8083063 Iustin Pop
378 9a6800e1 Michael Hanselmann
class Tasklet:
379 9a6800e1 Michael Hanselmann
  """Tasklet base class.
380 9a6800e1 Michael Hanselmann

381 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
382 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
383 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
384 9a6800e1 Michael Hanselmann

385 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
386 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
387 9a6800e1 Michael Hanselmann
    - Implement Exec
388 9a6800e1 Michael Hanselmann

389 9a6800e1 Michael Hanselmann
  """
390 464243a7 Michael Hanselmann
  def __init__(self, lu):
391 464243a7 Michael Hanselmann
    self.lu = lu
392 464243a7 Michael Hanselmann
393 464243a7 Michael Hanselmann
    # Shortcuts
394 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
395 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
396 464243a7 Michael Hanselmann
397 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
398 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
399 9a6800e1 Michael Hanselmann

400 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
401 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
402 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
403 9a6800e1 Michael Hanselmann

404 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
405 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
406 9a6800e1 Michael Hanselmann

407 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
408 9a6800e1 Michael Hanselmann
    hasn't been done before.
409 9a6800e1 Michael Hanselmann

410 9a6800e1 Michael Hanselmann
    """
411 9a6800e1 Michael Hanselmann
    raise NotImplementedError
412 9a6800e1 Michael Hanselmann
413 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
414 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
415 9a6800e1 Michael Hanselmann

416 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
417 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
418 9a6800e1 Michael Hanselmann
    expected.
419 9a6800e1 Michael Hanselmann

420 9a6800e1 Michael Hanselmann
    """
421 9a6800e1 Michael Hanselmann
    raise NotImplementedError
422 9a6800e1 Michael Hanselmann
423 9a6800e1 Michael Hanselmann
424 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
425 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
426 83120a01 Michael Hanselmann

427 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
428 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
429 e4376078 Iustin Pop
  @type nodes: list
430 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
431 e4376078 Iustin Pop
  @rtype: list
432 e4376078 Iustin Pop
  @return: the list of nodes, sorted
433 083a91c9 Iustin Pop
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
434 83120a01 Michael Hanselmann

435 83120a01 Michael Hanselmann
  """
436 3312b702 Iustin Pop
  if not isinstance(nodes, list):
437 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
438 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
439 dcb93971 Michael Hanselmann
440 ea47808a Guido Trotter
  if not nodes:
441 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
442 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
443 dcb93971 Michael Hanselmann
444 61dabca4 Iustin Pop
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
445 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
446 3312b702 Iustin Pop
447 3312b702 Iustin Pop
448 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
449 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
450 3312b702 Iustin Pop

451 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
452 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
453 e4376078 Iustin Pop
  @type instances: list
454 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
455 e4376078 Iustin Pop
  @rtype: list
456 e4376078 Iustin Pop
  @return: the list of instances, sorted
457 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
458 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
459 3312b702 Iustin Pop

460 3312b702 Iustin Pop
  """
461 3312b702 Iustin Pop
  if not isinstance(instances, list):
462 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
463 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
464 3312b702 Iustin Pop
465 3312b702 Iustin Pop
  if instances:
466 cf26a87a Iustin Pop
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
467 3312b702 Iustin Pop
  else:
468 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
469 a7f5dc98 Iustin Pop
  return wanted
470 dcb93971 Michael Hanselmann
471 dcb93971 Michael Hanselmann
472 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
473 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
474 83120a01 Michael Hanselmann

475 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
476 31bf511f Iustin Pop
  @param static: static fields set
477 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
478 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
479 83120a01 Michael Hanselmann

480 83120a01 Michael Hanselmann
  """
481 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
482 31bf511f Iustin Pop
  f.Extend(static)
483 31bf511f Iustin Pop
  f.Extend(dynamic)
484 dcb93971 Michael Hanselmann
485 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
486 31bf511f Iustin Pop
  if delta:
487 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
488 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
489 dcb93971 Michael Hanselmann
490 dcb93971 Michael Hanselmann
491 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
492 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
493 a5961235 Iustin Pop

494 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
495 a5961235 Iustin Pop
  or None (but that it always exists).
496 a5961235 Iustin Pop

497 a5961235 Iustin Pop
  """
498 a5961235 Iustin Pop
  val = getattr(op, name, None)
499 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
500 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
501 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
502 a5961235 Iustin Pop
  setattr(op, name, val)
503 a5961235 Iustin Pop
504 a5961235 Iustin Pop
505 7736a5f2 Iustin Pop
def _CheckGlobalHvParams(params):
506 7736a5f2 Iustin Pop
  """Validates that given hypervisor params are not global ones.
507 7736a5f2 Iustin Pop

508 7736a5f2 Iustin Pop
  This will ensure that instances don't get customised versions of
509 7736a5f2 Iustin Pop
  global params.
510 7736a5f2 Iustin Pop

511 7736a5f2 Iustin Pop
  """
512 7736a5f2 Iustin Pop
  used_globals = constants.HVC_GLOBALS.intersection(params)
513 7736a5f2 Iustin Pop
  if used_globals:
514 7736a5f2 Iustin Pop
    msg = ("The following hypervisor parameters are global and cannot"
515 7736a5f2 Iustin Pop
           " be customized at instance level, please modify them at"
516 1f864b60 Iustin Pop
           " cluster level: %s" % utils.CommaJoin(used_globals))
517 7736a5f2 Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
518 7736a5f2 Iustin Pop
519 7736a5f2 Iustin Pop
520 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
521 a5961235 Iustin Pop
  """Ensure that a given node is online.
522 a5961235 Iustin Pop

523 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
524 a5961235 Iustin Pop
  @param node: the node to check
525 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
526 a5961235 Iustin Pop

527 a5961235 Iustin Pop
  """
528 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
529 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
530 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
531 a5961235 Iustin Pop
532 a5961235 Iustin Pop
533 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
534 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
535 733a2b6a Iustin Pop

536 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
537 733a2b6a Iustin Pop
  @param node: the node to check
538 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
539 733a2b6a Iustin Pop

540 733a2b6a Iustin Pop
  """
541 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
542 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
543 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
544 733a2b6a Iustin Pop
545 733a2b6a Iustin Pop
546 231cd901 Iustin Pop
def _CheckNodeHasOS(lu, node, os_name, force_variant):
547 231cd901 Iustin Pop
  """Ensure that a node supports a given OS.
548 231cd901 Iustin Pop

549 231cd901 Iustin Pop
  @param lu: the LU on behalf of which we make the check
550 231cd901 Iustin Pop
  @param node: the node to check
551 231cd901 Iustin Pop
  @param os_name: the OS to query about
552 231cd901 Iustin Pop
  @param force_variant: whether to ignore variant errors
553 231cd901 Iustin Pop
  @raise errors.OpPrereqError: if the node is not supporting the OS
554 231cd901 Iustin Pop

555 231cd901 Iustin Pop
  """
556 231cd901 Iustin Pop
  result = lu.rpc.call_os_get(node, os_name)
557 231cd901 Iustin Pop
  result.Raise("OS '%s' not in supported OS list for node %s" %
558 231cd901 Iustin Pop
               (os_name, node),
559 231cd901 Iustin Pop
               prereq=True, ecode=errors.ECODE_INVAL)
560 231cd901 Iustin Pop
  if not force_variant:
561 231cd901 Iustin Pop
    _CheckOSVariant(result.payload, os_name)
562 231cd901 Iustin Pop
563 231cd901 Iustin Pop
564 0e3baaf3 Iustin Pop
def _RequireFileStorage():
565 0e3baaf3 Iustin Pop
  """Checks that file storage is enabled.
566 0e3baaf3 Iustin Pop

567 0e3baaf3 Iustin Pop
  @raise errors.OpPrereqError: when file storage is disabled
568 0e3baaf3 Iustin Pop

569 0e3baaf3 Iustin Pop
  """
570 0e3baaf3 Iustin Pop
  if not constants.ENABLE_FILE_STORAGE:
571 0e3baaf3 Iustin Pop
    raise errors.OpPrereqError("File storage disabled at configure time",
572 0e3baaf3 Iustin Pop
                               errors.ECODE_INVAL)
573 0e3baaf3 Iustin Pop
574 0e3baaf3 Iustin Pop
575 5d55819e Iustin Pop
def _CheckDiskTemplate(template):
576 5d55819e Iustin Pop
  """Ensure a given disk template is valid.
577 5d55819e Iustin Pop

578 5d55819e Iustin Pop
  """
579 5d55819e Iustin Pop
  if template not in constants.DISK_TEMPLATES:
580 5d55819e Iustin Pop
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
581 5d55819e Iustin Pop
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
582 5d55819e Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
583 0e3baaf3 Iustin Pop
  if template == constants.DT_FILE:
584 0e3baaf3 Iustin Pop
    _RequireFileStorage()
585 0e3baaf3 Iustin Pop
586 0e3baaf3 Iustin Pop
587 0e3baaf3 Iustin Pop
def _CheckStorageType(storage_type):
588 0e3baaf3 Iustin Pop
  """Ensure a given storage type is valid.
589 0e3baaf3 Iustin Pop

590 0e3baaf3 Iustin Pop
  """
591 0e3baaf3 Iustin Pop
  if storage_type not in constants.VALID_STORAGE_TYPES:
592 0e3baaf3 Iustin Pop
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
593 f276c4b5 Iustin Pop
                               errors.ECODE_INVAL)
594 0e3baaf3 Iustin Pop
  if storage_type == constants.ST_FILE:
595 0e3baaf3 Iustin Pop
    _RequireFileStorage()
596 0e3baaf3 Iustin Pop
597 5d55819e Iustin Pop
598 5d55819e Iustin Pop
599 31624382 Iustin Pop
def _CheckInstanceDown(lu, instance, reason):
600 31624382 Iustin Pop
  """Ensure that an instance is not running."""
601 31624382 Iustin Pop
  if instance.admin_up:
602 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
603 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
604 31624382 Iustin Pop
605 31624382 Iustin Pop
  pnode = instance.primary_node
606 31624382 Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
607 31624382 Iustin Pop
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
608 31624382 Iustin Pop
              prereq=True, ecode=errors.ECODE_ENVIRON)
609 31624382 Iustin Pop
610 31624382 Iustin Pop
  if instance.name in ins_l.payload:
611 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is running, %s" %
612 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
613 31624382 Iustin Pop
614 31624382 Iustin Pop
615 cf26a87a Iustin Pop
def _ExpandItemName(fn, name, kind):
616 cf26a87a Iustin Pop
  """Expand an item name.
617 cf26a87a Iustin Pop

618 cf26a87a Iustin Pop
  @param fn: the function to use for expansion
619 cf26a87a Iustin Pop
  @param name: requested item name
620 cf26a87a Iustin Pop
  @param kind: text description ('Node' or 'Instance')
621 cf26a87a Iustin Pop
  @return: the resolved (full) name
622 cf26a87a Iustin Pop
  @raise errors.OpPrereqError: if the item is not found
623 cf26a87a Iustin Pop

624 cf26a87a Iustin Pop
  """
625 cf26a87a Iustin Pop
  full_name = fn(name)
626 cf26a87a Iustin Pop
  if full_name is None:
627 cf26a87a Iustin Pop
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
628 cf26a87a Iustin Pop
                               errors.ECODE_NOENT)
629 cf26a87a Iustin Pop
  return full_name
630 cf26a87a Iustin Pop
631 cf26a87a Iustin Pop
632 cf26a87a Iustin Pop
def _ExpandNodeName(cfg, name):
633 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for nodes."""
634 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
635 cf26a87a Iustin Pop
636 cf26a87a Iustin Pop
637 cf26a87a Iustin Pop
def _ExpandInstanceName(cfg, name):
638 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for instance."""
639 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
640 cf26a87a Iustin Pop
641 cf26a87a Iustin Pop
642 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
643 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
644 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
645 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
646 e4376078 Iustin Pop

647 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
648 e4376078 Iustin Pop

649 e4376078 Iustin Pop
  @type name: string
650 e4376078 Iustin Pop
  @param name: the name of the instance
651 e4376078 Iustin Pop
  @type primary_node: string
652 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
653 e4376078 Iustin Pop
  @type secondary_nodes: list
654 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
655 e4376078 Iustin Pop
  @type os_type: string
656 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
657 0d68c45d Iustin Pop
  @type status: boolean
658 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
659 e4376078 Iustin Pop
  @type memory: string
660 e4376078 Iustin Pop
  @param memory: the memory size of the instance
661 e4376078 Iustin Pop
  @type vcpus: string
662 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
663 e4376078 Iustin Pop
  @type nics: list
664 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
665 5e3d3eb3 Guido Trotter
      the NICs the instance has
666 2c2690c9 Iustin Pop
  @type disk_template: string
667 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
668 2c2690c9 Iustin Pop
  @type disks: list
669 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
670 67fc3042 Iustin Pop
  @type bep: dict
671 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
672 67fc3042 Iustin Pop
  @type hvp: dict
673 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
674 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
675 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
676 e4376078 Iustin Pop
  @rtype: dict
677 e4376078 Iustin Pop
  @return: the hook environment for this instance
678 ecb215b5 Michael Hanselmann

679 396e1b78 Michael Hanselmann
  """
680 0d68c45d Iustin Pop
  if status:
681 0d68c45d Iustin Pop
    str_status = "up"
682 0d68c45d Iustin Pop
  else:
683 0d68c45d Iustin Pop
    str_status = "down"
684 396e1b78 Michael Hanselmann
  env = {
685 0e137c28 Iustin Pop
    "OP_TARGET": name,
686 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
687 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
688 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
689 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
690 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
691 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
692 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
693 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
694 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
695 396e1b78 Michael Hanselmann
  }
696 396e1b78 Michael Hanselmann
697 396e1b78 Michael Hanselmann
  if nics:
698 396e1b78 Michael Hanselmann
    nic_count = len(nics)
699 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
700 396e1b78 Michael Hanselmann
      if ip is None:
701 396e1b78 Michael Hanselmann
        ip = ""
702 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
703 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
704 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
705 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
706 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
707 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
708 396e1b78 Michael Hanselmann
  else:
709 396e1b78 Michael Hanselmann
    nic_count = 0
710 396e1b78 Michael Hanselmann
711 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
712 396e1b78 Michael Hanselmann
713 2c2690c9 Iustin Pop
  if disks:
714 2c2690c9 Iustin Pop
    disk_count = len(disks)
715 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
716 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
717 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
718 2c2690c9 Iustin Pop
  else:
719 2c2690c9 Iustin Pop
    disk_count = 0
720 2c2690c9 Iustin Pop
721 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
722 2c2690c9 Iustin Pop
723 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
724 67fc3042 Iustin Pop
    for key, value in source.items():
725 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
726 67fc3042 Iustin Pop
727 396e1b78 Michael Hanselmann
  return env
728 396e1b78 Michael Hanselmann
729 96acbc09 Michael Hanselmann
730 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
731 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
732 62f0dd02 Guido Trotter

733 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
734 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
735 62f0dd02 Guido Trotter

736 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
737 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
738 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
739 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
740 62f0dd02 Guido Trotter

741 62f0dd02 Guido Trotter
  """
742 62f0dd02 Guido Trotter
  hooks_nics = []
743 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
744 62f0dd02 Guido Trotter
  for nic in nics:
745 62f0dd02 Guido Trotter
    ip = nic.ip
746 62f0dd02 Guido Trotter
    mac = nic.mac
747 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
748 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
749 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
750 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
751 62f0dd02 Guido Trotter
  return hooks_nics
752 396e1b78 Michael Hanselmann
753 96acbc09 Michael Hanselmann
754 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
755 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
756 ecb215b5 Michael Hanselmann

757 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
758 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
759 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
760 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
761 e4376078 Iustin Pop
      environment
762 e4376078 Iustin Pop
  @type override: dict
763 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
764 e4376078 Iustin Pop
      our values
765 e4376078 Iustin Pop
  @rtype: dict
766 e4376078 Iustin Pop
  @return: the hook environment dictionary
767 e4376078 Iustin Pop

768 ecb215b5 Michael Hanselmann
  """
769 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
770 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
771 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
772 396e1b78 Michael Hanselmann
  args = {
773 396e1b78 Michael Hanselmann
    'name': instance.name,
774 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
775 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
776 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
777 0d68c45d Iustin Pop
    'status': instance.admin_up,
778 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
779 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
780 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
781 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
782 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
783 67fc3042 Iustin Pop
    'bep': bep,
784 67fc3042 Iustin Pop
    'hvp': hvp,
785 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
786 396e1b78 Michael Hanselmann
  }
787 396e1b78 Michael Hanselmann
  if override:
788 396e1b78 Michael Hanselmann
    args.update(override)
789 7260cfbe Iustin Pop
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
790 396e1b78 Michael Hanselmann
791 396e1b78 Michael Hanselmann
792 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
793 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
794 ec0292f1 Iustin Pop

795 ec0292f1 Iustin Pop
  """
796 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
797 ec0292f1 Iustin Pop
  if mod_list:
798 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
799 1f864b60 Iustin Pop
               utils.CommaJoin(node.name for node in mod_list))
800 ec0292f1 Iustin Pop
    for name in mod_list:
801 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
802 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
803 ec0292f1 Iustin Pop
  if mc_now > mc_max:
804 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
805 ec0292f1 Iustin Pop
               (mc_now, mc_max))
806 ec0292f1 Iustin Pop
807 ec0292f1 Iustin Pop
808 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
809 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
810 6d7e1f20 Guido Trotter

811 6d7e1f20 Guido Trotter
  """
812 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
813 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
814 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
815 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
816 6d7e1f20 Guido Trotter
  return mc_now < mc_should
817 6d7e1f20 Guido Trotter
818 6d7e1f20 Guido Trotter
819 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
820 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
821 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
822 b165e77e Guido Trotter

823 b165e77e Guido Trotter
  """
824 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
825 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
826 b165e77e Guido Trotter
                for nic in target_nics]
827 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
828 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
829 b165e77e Guido Trotter
  if brlist:
830 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
831 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
832 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
833 b165e77e Guido Trotter
834 b165e77e Guido Trotter
835 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
836 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
837 bf6929a2 Alexander Schreiber

838 bf6929a2 Alexander Schreiber
  """
839 b165e77e Guido Trotter
  if node is None:
840 29921401 Iustin Pop
    node = instance.primary_node
841 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
842 bf6929a2 Alexander Schreiber
843 bf6929a2 Alexander Schreiber
844 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
845 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
846 f2c05717 Guido Trotter

847 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
848 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
849 f2c05717 Guido Trotter
  @type name: string
850 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
851 f2c05717 Guido Trotter

852 f2c05717 Guido Trotter
  """
853 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
854 f2c05717 Guido Trotter
    return
855 f2c05717 Guido Trotter
  try:
856 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
857 f2c05717 Guido Trotter
  except IndexError:
858 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
859 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
860 f2c05717 Guido Trotter
861 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
862 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
863 f2c05717 Guido Trotter
864 f2c05717 Guido Trotter
865 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
866 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
867 5ba9701d Michael Hanselmann
868 5ba9701d Michael Hanselmann
869 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
870 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
871 e9721add Michael Hanselmann

872 e9721add Michael Hanselmann
  """
873 e9721add Michael Hanselmann
874 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
875 e9721add Michael Hanselmann
876 e9721add Michael Hanselmann
877 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
878 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
879 80cb875c Michael Hanselmann

880 80cb875c Michael Hanselmann
  """
881 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
882 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
883 80cb875c Michael Hanselmann
884 80cb875c Michael Hanselmann
885 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
886 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
887 692738fc Michael Hanselmann

888 692738fc Michael Hanselmann
  """
889 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
890 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
891 692738fc Michael Hanselmann
892 692738fc Michael Hanselmann
893 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
894 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
895 efb8da02 Michael Hanselmann

896 efb8da02 Michael Hanselmann
  """
897 efb8da02 Michael Hanselmann
  # Special case for file storage
898 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
899 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
900 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
901 efb8da02 Michael Hanselmann
902 efb8da02 Michael Hanselmann
  return []
903 efb8da02 Michael Hanselmann
904 efb8da02 Michael Hanselmann
905 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
906 2d9005d8 Michael Hanselmann
  faulty = []
907 2d9005d8 Michael Hanselmann
908 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
909 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
910 2d9005d8 Michael Hanselmann
911 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
912 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
913 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
914 2d9005d8 Michael Hanselmann
915 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
916 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
917 2d9005d8 Michael Hanselmann
      faulty.append(idx)
918 2d9005d8 Michael Hanselmann
919 2d9005d8 Michael Hanselmann
  return faulty
920 2d9005d8 Michael Hanselmann
921 2d9005d8 Michael Hanselmann
922 b98bf262 Michael Hanselmann
def _FormatTimestamp(secs):
923 b98bf262 Michael Hanselmann
  """Formats a Unix timestamp with the local timezone.
924 b98bf262 Michael Hanselmann

925 b98bf262 Michael Hanselmann
  """
926 b98bf262 Michael Hanselmann
  return time.strftime("%F %T %Z", time.gmtime(secs))
927 b98bf262 Michael Hanselmann
928 b98bf262 Michael Hanselmann
929 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
930 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
931 b5f5fae9 Luca Bigliardi

932 b5f5fae9 Luca Bigliardi
  """
933 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
934 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
935 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
936 b5f5fae9 Luca Bigliardi
937 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
938 b5f5fae9 Luca Bigliardi
    """Build hooks env.
939 b5f5fae9 Luca Bigliardi

940 b5f5fae9 Luca Bigliardi
    """
941 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
942 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
943 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
944 b5f5fae9 Luca Bigliardi
945 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
946 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
947 b5f5fae9 Luca Bigliardi

948 b5f5fae9 Luca Bigliardi
    """
949 b5f5fae9 Luca Bigliardi
    return True
950 b5f5fae9 Luca Bigliardi
951 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
952 b5f5fae9 Luca Bigliardi
    """Nothing to do.
953 b5f5fae9 Luca Bigliardi

954 b5f5fae9 Luca Bigliardi
    """
955 b5f5fae9 Luca Bigliardi
    return True
956 b5f5fae9 Luca Bigliardi
957 b5f5fae9 Luca Bigliardi
958 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
959 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
960 a8083063 Iustin Pop

961 a8083063 Iustin Pop
  """
962 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
963 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
964 a8083063 Iustin Pop
  _OP_REQP = []
965 a8083063 Iustin Pop
966 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
967 b2c750a4 Luca Bigliardi
    """Build hooks env.
968 b2c750a4 Luca Bigliardi

969 b2c750a4 Luca Bigliardi
    """
970 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
971 b2c750a4 Luca Bigliardi
    return env, [], []
972 b2c750a4 Luca Bigliardi
973 a8083063 Iustin Pop
  def CheckPrereq(self):
974 a8083063 Iustin Pop
    """Check prerequisites.
975 a8083063 Iustin Pop

976 a8083063 Iustin Pop
    This checks whether the cluster is empty.
977 a8083063 Iustin Pop

978 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
979 a8083063 Iustin Pop

980 a8083063 Iustin Pop
    """
981 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
982 a8083063 Iustin Pop
983 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
984 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
985 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
986 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
987 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
988 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
989 db915bd1 Michael Hanselmann
    if instancelist:
990 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
991 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
992 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
993 a8083063 Iustin Pop
994 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
995 a8083063 Iustin Pop
    """Destroys the cluster.
996 a8083063 Iustin Pop

997 a8083063 Iustin Pop
    """
998 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
999 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1000 3141ad3b Luca Bigliardi
1001 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
1002 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1003 3141ad3b Luca Bigliardi
    try:
1004 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1005 3141ad3b Luca Bigliardi
    except:
1006 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
1007 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
1008 3141ad3b Luca Bigliardi
1009 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1010 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1011 b989b9d9 Ken Wehr
1012 b989b9d9 Ken Wehr
    if modify_ssh_setup:
1013 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1014 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
1015 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
1016 b989b9d9 Ken Wehr
1017 140aa4a8 Iustin Pop
    return master
1018 a8083063 Iustin Pop
1019 a8083063 Iustin Pop
1020 b98bf262 Michael Hanselmann
def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
1021 b98bf262 Michael Hanselmann
                            warn_days=constants.SSL_CERT_EXPIRATION_WARN,
1022 b98bf262 Michael Hanselmann
                            error_days=constants.SSL_CERT_EXPIRATION_ERROR):
1023 b98bf262 Michael Hanselmann
  """Verifies certificate details for LUVerifyCluster.
1024 b98bf262 Michael Hanselmann

1025 b98bf262 Michael Hanselmann
  """
1026 b98bf262 Michael Hanselmann
  if expired:
1027 b98bf262 Michael Hanselmann
    msg = "Certificate %s is expired" % filename
1028 b98bf262 Michael Hanselmann
1029 b98bf262 Michael Hanselmann
    if not_before is not None and not_after is not None:
1030 b98bf262 Michael Hanselmann
      msg += (" (valid from %s to %s)" %
1031 b98bf262 Michael Hanselmann
              (_FormatTimestamp(not_before),
1032 b98bf262 Michael Hanselmann
               _FormatTimestamp(not_after)))
1033 b98bf262 Michael Hanselmann
    elif not_before is not None:
1034 b98bf262 Michael Hanselmann
      msg += " (valid from %s)" % _FormatTimestamp(not_before)
1035 b98bf262 Michael Hanselmann
    elif not_after is not None:
1036 b98bf262 Michael Hanselmann
      msg += " (valid until %s)" % _FormatTimestamp(not_after)
1037 b98bf262 Michael Hanselmann
1038 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR, msg)
1039 b98bf262 Michael Hanselmann
1040 b98bf262 Michael Hanselmann
  elif not_before is not None and not_before > now:
1041 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_WARNING,
1042 b98bf262 Michael Hanselmann
            "Certificate %s not yet valid (valid from %s)" %
1043 b98bf262 Michael Hanselmann
            (filename, _FormatTimestamp(not_before)))
1044 b98bf262 Michael Hanselmann
1045 b98bf262 Michael Hanselmann
  elif not_after is not None:
1046 b98bf262 Michael Hanselmann
    remaining_days = int((not_after - now) / (24 * 3600))
1047 b98bf262 Michael Hanselmann
1048 b98bf262 Michael Hanselmann
    msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
1049 b98bf262 Michael Hanselmann
1050 b98bf262 Michael Hanselmann
    if remaining_days <= error_days:
1051 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_ERROR, msg)
1052 b98bf262 Michael Hanselmann
1053 b98bf262 Michael Hanselmann
    if remaining_days <= warn_days:
1054 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_WARNING, msg)
1055 b98bf262 Michael Hanselmann
1056 b98bf262 Michael Hanselmann
  return (None, None)
1057 b98bf262 Michael Hanselmann
1058 b98bf262 Michael Hanselmann
1059 b98bf262 Michael Hanselmann
def _VerifyCertificate(filename):
1060 b98bf262 Michael Hanselmann
  """Verifies a certificate for LUVerifyCluster.
1061 b98bf262 Michael Hanselmann

1062 b98bf262 Michael Hanselmann
  @type filename: string
1063 b98bf262 Michael Hanselmann
  @param filename: Path to PEM file
1064 b98bf262 Michael Hanselmann

1065 b98bf262 Michael Hanselmann
  """
1066 b98bf262 Michael Hanselmann
  try:
1067 b98bf262 Michael Hanselmann
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1068 b98bf262 Michael Hanselmann
                                           utils.ReadFile(filename))
1069 b98bf262 Michael Hanselmann
  except Exception, err: # pylint: disable-msg=W0703
1070 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR,
1071 b98bf262 Michael Hanselmann
            "Failed to load X509 certificate %s: %s" % (filename, err))
1072 b98bf262 Michael Hanselmann
1073 b98bf262 Michael Hanselmann
  # Depending on the pyOpenSSL version, this can just return (None, None)
1074 b98bf262 Michael Hanselmann
  (not_before, not_after) = utils.GetX509CertValidity(cert)
1075 b98bf262 Michael Hanselmann
1076 b98bf262 Michael Hanselmann
  return _VerifyCertificateInner(filename, cert.has_expired(),
1077 b98bf262 Michael Hanselmann
                                 not_before, not_after, time.time())
1078 b98bf262 Michael Hanselmann
1079 b98bf262 Michael Hanselmann
1080 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
1081 a8083063 Iustin Pop
  """Verifies the cluster status.
1082 a8083063 Iustin Pop

1083 a8083063 Iustin Pop
  """
1084 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
1085 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
1086 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1087 d4b9d97f Guido Trotter
  REQ_BGL = False
1088 d4b9d97f Guido Trotter
1089 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
1090 7c874ee1 Iustin Pop
  TNODE = "node"
1091 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
1092 7c874ee1 Iustin Pop
1093 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1094 b98bf262 Michael Hanselmann
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1095 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1096 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1097 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1098 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1099 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1100 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1101 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
1102 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1103 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1104 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
1105 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
1106 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
1107 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
1108 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1109 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1110 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
1111 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
1112 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
1113 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
1114 313b2dd4 Michael Hanselmann
  ENODETIME = (TNODE, "ENODETIME")
1115 7c874ee1 Iustin Pop
1116 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
1117 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
1118 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
1119 a0c9776a Iustin Pop
1120 02c521e4 Iustin Pop
  class NodeImage(object):
1121 02c521e4 Iustin Pop
    """A class representing the logical and physical status of a node.
1122 02c521e4 Iustin Pop

1123 02c521e4 Iustin Pop
    @ivar volumes: a structure as returned from
1124 3a488770 Iustin Pop
        L{ganeti.backend.GetVolumeList} (runtime)
1125 02c521e4 Iustin Pop
    @ivar instances: a list of running instances (runtime)
1126 02c521e4 Iustin Pop
    @ivar pinst: list of configured primary instances (config)
1127 02c521e4 Iustin Pop
    @ivar sinst: list of configured secondary instances (config)
1128 02c521e4 Iustin Pop
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1129 02c521e4 Iustin Pop
        of this node (config)
1130 02c521e4 Iustin Pop
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1131 02c521e4 Iustin Pop
    @ivar dfree: free disk, as reported by the node (runtime)
1132 02c521e4 Iustin Pop
    @ivar offline: the offline status (config)
1133 02c521e4 Iustin Pop
    @type rpc_fail: boolean
1134 02c521e4 Iustin Pop
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1135 02c521e4 Iustin Pop
        not whether the individual keys were correct) (runtime)
1136 02c521e4 Iustin Pop
    @type lvm_fail: boolean
1137 02c521e4 Iustin Pop
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1138 02c521e4 Iustin Pop
    @type hyp_fail: boolean
1139 02c521e4 Iustin Pop
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1140 02c521e4 Iustin Pop
    @type ghost: boolean
1141 02c521e4 Iustin Pop
    @ivar ghost: whether this is a known node or not (config)
1142 02c521e4 Iustin Pop

1143 02c521e4 Iustin Pop
    """
1144 02c521e4 Iustin Pop
    def __init__(self, offline=False):
1145 02c521e4 Iustin Pop
      self.volumes = {}
1146 02c521e4 Iustin Pop
      self.instances = []
1147 02c521e4 Iustin Pop
      self.pinst = []
1148 02c521e4 Iustin Pop
      self.sinst = []
1149 02c521e4 Iustin Pop
      self.sbp = {}
1150 02c521e4 Iustin Pop
      self.mfree = 0
1151 02c521e4 Iustin Pop
      self.dfree = 0
1152 02c521e4 Iustin Pop
      self.offline = offline
1153 02c521e4 Iustin Pop
      self.rpc_fail = False
1154 02c521e4 Iustin Pop
      self.lvm_fail = False
1155 02c521e4 Iustin Pop
      self.hyp_fail = False
1156 02c521e4 Iustin Pop
      self.ghost = False
1157 02c521e4 Iustin Pop
1158 d4b9d97f Guido Trotter
  def ExpandNames(self):
1159 d4b9d97f Guido Trotter
    self.needed_locks = {
1160 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1161 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1162 d4b9d97f Guido Trotter
    }
1163 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1164 a8083063 Iustin Pop
1165 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
1166 7c874ee1 Iustin Pop
    """Format an error message.
1167 7c874ee1 Iustin Pop

1168 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
1169 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
1170 7c874ee1 Iustin Pop

1171 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
1172 7c874ee1 Iustin Pop

1173 7c874ee1 Iustin Pop
    """
1174 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1175 7c874ee1 Iustin Pop
    itype, etxt = ecode
1176 7c874ee1 Iustin Pop
    # first complete the msg
1177 7c874ee1 Iustin Pop
    if args:
1178 7c874ee1 Iustin Pop
      msg = msg % args
1179 7c874ee1 Iustin Pop
    # then format the whole message
1180 7c874ee1 Iustin Pop
    if self.op.error_codes:
1181 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1182 7c874ee1 Iustin Pop
    else:
1183 7c874ee1 Iustin Pop
      if item:
1184 7c874ee1 Iustin Pop
        item = " " + item
1185 7c874ee1 Iustin Pop
      else:
1186 7c874ee1 Iustin Pop
        item = ""
1187 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1188 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
1189 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
1190 7c874ee1 Iustin Pop
1191 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
1192 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
1193 a0c9776a Iustin Pop

1194 a0c9776a Iustin Pop
    """
1195 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
1196 a0c9776a Iustin Pop
    if cond:
1197 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
1198 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
1199 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1200 a0c9776a Iustin Pop
      self.bad = self.bad or cond
1201 a0c9776a Iustin Pop
1202 02c521e4 Iustin Pop
  def _VerifyNode(self, ninfo, nresult):
1203 a8083063 Iustin Pop
    """Run multiple tests against a node.
1204 a8083063 Iustin Pop

1205 112f18a5 Iustin Pop
    Test list:
1206 e4376078 Iustin Pop

1207 a8083063 Iustin Pop
      - compares ganeti version
1208 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
1209 a8083063 Iustin Pop
      - checks config file checksum
1210 a8083063 Iustin Pop
      - checks ssh to other nodes
1211 a8083063 Iustin Pop

1212 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1213 02c521e4 Iustin Pop
    @param ninfo: the node to check
1214 02c521e4 Iustin Pop
    @param nresult: the results from the node
1215 02c521e4 Iustin Pop
    @rtype: boolean
1216 02c521e4 Iustin Pop
    @return: whether overall this call was successful (and we can expect
1217 02c521e4 Iustin Pop
         reasonable values in the respose)
1218 098c0958 Michael Hanselmann

1219 a8083063 Iustin Pop
    """
1220 02c521e4 Iustin Pop
    node = ninfo.name
1221 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1222 25361b9a Iustin Pop
1223 02c521e4 Iustin Pop
    # main result, nresult should be a non-empty dict
1224 02c521e4 Iustin Pop
    test = not nresult or not isinstance(nresult, dict)
1225 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1226 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1227 a0c9776a Iustin Pop
    if test:
1228 02c521e4 Iustin Pop
      return False
1229 25361b9a Iustin Pop
1230 a8083063 Iustin Pop
    # compares ganeti version
1231 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1232 02c521e4 Iustin Pop
    remote_version = nresult.get("version", None)
1233 a0c9776a Iustin Pop
    test = not (remote_version and
1234 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1235 a0c9776a Iustin Pop
                len(remote_version) == 2)
1236 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1237 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1238 a0c9776a Iustin Pop
    if test:
1239 02c521e4 Iustin Pop
      return False
1240 a0c9776a Iustin Pop
1241 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1242 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1243 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1244 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1245 a0c9776a Iustin Pop
    if test:
1246 02c521e4 Iustin Pop
      return False
1247 a8083063 Iustin Pop
1248 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1249 a8083063 Iustin Pop
1250 e9ce0a64 Iustin Pop
    # full package version
1251 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1252 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1253 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1254 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1255 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1256 e9ce0a64 Iustin Pop
1257 02c521e4 Iustin Pop
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1258 02c521e4 Iustin Pop
    if isinstance(hyp_result, dict):
1259 02c521e4 Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1260 02c521e4 Iustin Pop
        test = hv_result is not None
1261 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1262 02c521e4 Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1263 a8083063 Iustin Pop
1264 a8083063 Iustin Pop
1265 02c521e4 Iustin Pop
    test = nresult.get(constants.NV_NODESETUP,
1266 02c521e4 Iustin Pop
                           ["Missing NODESETUP results"])
1267 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1268 02c521e4 Iustin Pop
             "; ".join(test))
1269 02c521e4 Iustin Pop
1270 02c521e4 Iustin Pop
    return True
1271 02c521e4 Iustin Pop
1272 02c521e4 Iustin Pop
  def _VerifyNodeTime(self, ninfo, nresult,
1273 02c521e4 Iustin Pop
                      nvinfo_starttime, nvinfo_endtime):
1274 02c521e4 Iustin Pop
    """Check the node time.
1275 02c521e4 Iustin Pop

1276 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1277 02c521e4 Iustin Pop
    @param ninfo: the node to check
1278 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1279 02c521e4 Iustin Pop
    @param nvinfo_starttime: the start time of the RPC call
1280 02c521e4 Iustin Pop
    @param nvinfo_endtime: the end time of the RPC call
1281 02c521e4 Iustin Pop

1282 02c521e4 Iustin Pop
    """
1283 02c521e4 Iustin Pop
    node = ninfo.name
1284 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1285 02c521e4 Iustin Pop
1286 02c521e4 Iustin Pop
    ntime = nresult.get(constants.NV_TIME, None)
1287 02c521e4 Iustin Pop
    try:
1288 02c521e4 Iustin Pop
      ntime_merged = utils.MergeTime(ntime)
1289 02c521e4 Iustin Pop
    except (ValueError, TypeError):
1290 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1291 02c521e4 Iustin Pop
      return
1292 02c521e4 Iustin Pop
1293 02c521e4 Iustin Pop
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1294 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1295 02c521e4 Iustin Pop
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1296 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1297 02c521e4 Iustin Pop
    else:
1298 02c521e4 Iustin Pop
      ntime_diff = None
1299 02c521e4 Iustin Pop
1300 02c521e4 Iustin Pop
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1301 02c521e4 Iustin Pop
             "Node time diverges by at least %s from master node time",
1302 02c521e4 Iustin Pop
             ntime_diff)
1303 02c521e4 Iustin Pop
1304 02c521e4 Iustin Pop
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1305 02c521e4 Iustin Pop
    """Check the node time.
1306 02c521e4 Iustin Pop

1307 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1308 02c521e4 Iustin Pop
    @param ninfo: the node to check
1309 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1310 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1311 02c521e4 Iustin Pop

1312 02c521e4 Iustin Pop
    """
1313 02c521e4 Iustin Pop
    if vg_name is None:
1314 02c521e4 Iustin Pop
      return
1315 02c521e4 Iustin Pop
1316 02c521e4 Iustin Pop
    node = ninfo.name
1317 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1318 02c521e4 Iustin Pop
1319 02c521e4 Iustin Pop
    # checks vg existence and size > 20G
1320 02c521e4 Iustin Pop
    vglist = nresult.get(constants.NV_VGLIST, None)
1321 02c521e4 Iustin Pop
    test = not vglist
1322 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1323 02c521e4 Iustin Pop
    if not test:
1324 02c521e4 Iustin Pop
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1325 02c521e4 Iustin Pop
                                            constants.MIN_VG_SIZE)
1326 02c521e4 Iustin Pop
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1327 02c521e4 Iustin Pop
1328 02c521e4 Iustin Pop
    # check pv names
1329 02c521e4 Iustin Pop
    pvlist = nresult.get(constants.NV_PVLIST, None)
1330 02c521e4 Iustin Pop
    test = pvlist is None
1331 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1332 a0c9776a Iustin Pop
    if not test:
1333 02c521e4 Iustin Pop
      # check that ':' is not present in PV names, since it's a
1334 02c521e4 Iustin Pop
      # special character for lvcreate (denotes the range of PEs to
1335 02c521e4 Iustin Pop
      # use on the PV)
1336 02c521e4 Iustin Pop
      for _, pvname, owner_vg in pvlist:
1337 02c521e4 Iustin Pop
        test = ":" in pvname
1338 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1339 02c521e4 Iustin Pop
                 " '%s' of VG '%s'", pvname, owner_vg)
1340 02c521e4 Iustin Pop
1341 02c521e4 Iustin Pop
  def _VerifyNodeNetwork(self, ninfo, nresult):
1342 02c521e4 Iustin Pop
    """Check the node time.
1343 02c521e4 Iustin Pop

1344 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1345 02c521e4 Iustin Pop
    @param ninfo: the node to check
1346 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1347 02c521e4 Iustin Pop

1348 02c521e4 Iustin Pop
    """
1349 02c521e4 Iustin Pop
    node = ninfo.name
1350 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1351 02c521e4 Iustin Pop
1352 02c521e4 Iustin Pop
    test = constants.NV_NODELIST not in nresult
1353 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1354 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1355 a0c9776a Iustin Pop
    if not test:
1356 02c521e4 Iustin Pop
      if nresult[constants.NV_NODELIST]:
1357 02c521e4 Iustin Pop
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1358 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1359 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1360 25361b9a Iustin Pop
1361 02c521e4 Iustin Pop
    test = constants.NV_NODENETTEST not in nresult
1362 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1363 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1364 a0c9776a Iustin Pop
    if not test:
1365 02c521e4 Iustin Pop
      if nresult[constants.NV_NODENETTEST]:
1366 02c521e4 Iustin Pop
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1367 7c874ee1 Iustin Pop
        for anode in nlist:
1368 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1369 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1370 02c521e4 Iustin Pop
                   anode, nresult[constants.NV_NODENETTEST][anode])
1371 a8083063 Iustin Pop
1372 02c521e4 Iustin Pop
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1373 a8083063 Iustin Pop
    """Verify an instance.
1374 a8083063 Iustin Pop

1375 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1376 a8083063 Iustin Pop
    available on the instance's node.
1377 a8083063 Iustin Pop

1378 a8083063 Iustin Pop
    """
1379 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1380 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1381 a8083063 Iustin Pop
1382 a8083063 Iustin Pop
    node_vol_should = {}
1383 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1384 a8083063 Iustin Pop
1385 a8083063 Iustin Pop
    for node in node_vol_should:
1386 02c521e4 Iustin Pop
      n_img = node_image[node]
1387 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1388 02c521e4 Iustin Pop
        # ignore missing volumes on offline or broken nodes
1389 0a66c968 Iustin Pop
        continue
1390 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1391 02c521e4 Iustin Pop
        test = volume not in n_img.volumes
1392 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1393 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1394 a8083063 Iustin Pop
1395 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1396 02c521e4 Iustin Pop
      pri_img = node_image[node_current]
1397 02c521e4 Iustin Pop
      test = instance not in pri_img.instances and not pri_img.offline
1398 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1399 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1400 a0c9776a Iustin Pop
               node_current)
1401 a8083063 Iustin Pop
1402 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1403 a8083063 Iustin Pop
      if (not node == node_current):
1404 02c521e4 Iustin Pop
        test = instance in n_img.instances
1405 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1406 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1407 a8083063 Iustin Pop
1408 02c521e4 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1409 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1410 a8083063 Iustin Pop

1411 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1412 a8083063 Iustin Pop
    reported as unknown.
1413 a8083063 Iustin Pop

1414 a8083063 Iustin Pop
    """
1415 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1416 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1417 02c521e4 Iustin Pop
        # skip non-healthy nodes
1418 02c521e4 Iustin Pop
        continue
1419 02c521e4 Iustin Pop
      for volume in n_img.volumes:
1420 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1421 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1422 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1423 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1424 a8083063 Iustin Pop
1425 02c521e4 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_image):
1426 a8083063 Iustin Pop
    """Verify the list of running instances.
1427 a8083063 Iustin Pop

1428 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1429 a8083063 Iustin Pop

1430 a8083063 Iustin Pop
    """
1431 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1432 02c521e4 Iustin Pop
      for o_inst in n_img.instances:
1433 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1434 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1435 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1436 a8083063 Iustin Pop
1437 02c521e4 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1438 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1439 2b3b6ddd Guido Trotter

1440 02c521e4 Iustin Pop
    Check that if one single node dies we can still start all the
1441 02c521e4 Iustin Pop
    instances it was primary for.
1442 2b3b6ddd Guido Trotter

1443 2b3b6ddd Guido Trotter
    """
1444 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1445 02c521e4 Iustin Pop
      # This code checks that every node which is now listed as
1446 02c521e4 Iustin Pop
      # secondary has enough memory to host all instances it is
1447 02c521e4 Iustin Pop
      # supposed to should a single other node in the cluster fail.
1448 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1449 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1450 02c521e4 Iustin Pop
      # WARNING: we currently take into account down instances as well
1451 02c521e4 Iustin Pop
      # as up ones, considering that even if they're down someone
1452 02c521e4 Iustin Pop
      # might want to start them even in the event of a node failure.
1453 02c521e4 Iustin Pop
      for prinode, instances in n_img.sbp.items():
1454 2b3b6ddd Guido Trotter
        needed_mem = 0
1455 2b3b6ddd Guido Trotter
        for instance in instances:
1456 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1457 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1458 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1459 02c521e4 Iustin Pop
        test = n_img.mfree < needed_mem
1460 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1461 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1462 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1463 2b3b6ddd Guido Trotter
1464 02c521e4 Iustin Pop
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1465 02c521e4 Iustin Pop
                       master_files):
1466 02c521e4 Iustin Pop
    """Verifies and computes the node required file checksums.
1467 02c521e4 Iustin Pop

1468 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1469 02c521e4 Iustin Pop
    @param ninfo: the node to check
1470 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1471 02c521e4 Iustin Pop
    @param file_list: required list of files
1472 02c521e4 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
1473 02c521e4 Iustin Pop
    @param master_files: list of files that only masters should have
1474 02c521e4 Iustin Pop

1475 02c521e4 Iustin Pop
    """
1476 02c521e4 Iustin Pop
    node = ninfo.name
1477 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1478 02c521e4 Iustin Pop
1479 02c521e4 Iustin Pop
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1480 02c521e4 Iustin Pop
    test = not isinstance(remote_cksum, dict)
1481 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1482 02c521e4 Iustin Pop
             "node hasn't returned file checksum data")
1483 02c521e4 Iustin Pop
    if test:
1484 02c521e4 Iustin Pop
      return
1485 02c521e4 Iustin Pop
1486 02c521e4 Iustin Pop
    for file_name in file_list:
1487 02c521e4 Iustin Pop
      node_is_mc = ninfo.master_candidate
1488 02c521e4 Iustin Pop
      must_have = (file_name not in master_files) or node_is_mc
1489 02c521e4 Iustin Pop
      # missing
1490 02c521e4 Iustin Pop
      test1 = file_name not in remote_cksum
1491 02c521e4 Iustin Pop
      # invalid checksum
1492 02c521e4 Iustin Pop
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1493 02c521e4 Iustin Pop
      # existing and good
1494 02c521e4 Iustin Pop
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1495 02c521e4 Iustin Pop
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1496 02c521e4 Iustin Pop
               "file '%s' missing", file_name)
1497 02c521e4 Iustin Pop
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1498 02c521e4 Iustin Pop
               "file '%s' has wrong checksum", file_name)
1499 02c521e4 Iustin Pop
      # not candidate and this is not a must-have file
1500 02c521e4 Iustin Pop
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1501 02c521e4 Iustin Pop
               "file '%s' should not exist on non master"
1502 02c521e4 Iustin Pop
               " candidates (and the file is outdated)", file_name)
1503 02c521e4 Iustin Pop
      # all good, except non-master/non-must have combination
1504 02c521e4 Iustin Pop
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1505 02c521e4 Iustin Pop
               "file '%s' should not exist"
1506 02c521e4 Iustin Pop
               " on non master candidates", file_name)
1507 02c521e4 Iustin Pop
1508 02c521e4 Iustin Pop
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1509 02c521e4 Iustin Pop
    """Verifies and the node DRBD status.
1510 02c521e4 Iustin Pop

1511 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1512 02c521e4 Iustin Pop
    @param ninfo: the node to check
1513 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1514 02c521e4 Iustin Pop
    @param instanceinfo: the dict of instances
1515 02c521e4 Iustin Pop
    @param drbd_map: the DRBD map as returned by
1516 02c521e4 Iustin Pop
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1517 02c521e4 Iustin Pop

1518 02c521e4 Iustin Pop
    """
1519 02c521e4 Iustin Pop
    node = ninfo.name
1520 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1521 02c521e4 Iustin Pop
1522 02c521e4 Iustin Pop
    # compute the DRBD minors
1523 02c521e4 Iustin Pop
    node_drbd = {}
1524 02c521e4 Iustin Pop
    for minor, instance in drbd_map[node].items():
1525 02c521e4 Iustin Pop
      test = instance not in instanceinfo
1526 02c521e4 Iustin Pop
      _ErrorIf(test, self.ECLUSTERCFG, None,
1527 02c521e4 Iustin Pop
               "ghost instance '%s' in temporary DRBD map", instance)
1528 02c521e4 Iustin Pop
        # ghost instance should not be running, but otherwise we
1529 02c521e4 Iustin Pop
        # don't give double warnings (both ghost instance and
1530 02c521e4 Iustin Pop
        # unallocated minor in use)
1531 02c521e4 Iustin Pop
      if test:
1532 02c521e4 Iustin Pop
        node_drbd[minor] = (instance, False)
1533 02c521e4 Iustin Pop
      else:
1534 02c521e4 Iustin Pop
        instance = instanceinfo[instance]
1535 02c521e4 Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
1536 02c521e4 Iustin Pop
1537 02c521e4 Iustin Pop
    # and now check them
1538 02c521e4 Iustin Pop
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1539 02c521e4 Iustin Pop
    test = not isinstance(used_minors, (tuple, list))
1540 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEDRBD, node,
1541 02c521e4 Iustin Pop
             "cannot parse drbd status file: %s", str(used_minors))
1542 02c521e4 Iustin Pop
    if test:
1543 02c521e4 Iustin Pop
      # we cannot check drbd status
1544 02c521e4 Iustin Pop
      return
1545 02c521e4 Iustin Pop
1546 02c521e4 Iustin Pop
    for minor, (iname, must_exist) in node_drbd.items():
1547 02c521e4 Iustin Pop
      test = minor not in used_minors and must_exist
1548 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1549 02c521e4 Iustin Pop
               "drbd minor %d of instance %s is not active", minor, iname)
1550 02c521e4 Iustin Pop
    for minor in used_minors:
1551 02c521e4 Iustin Pop
      test = minor not in node_drbd
1552 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1553 02c521e4 Iustin Pop
               "unallocated drbd minor %d is in use", minor)
1554 02c521e4 Iustin Pop
1555 02c521e4 Iustin Pop
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1556 02c521e4 Iustin Pop
    """Verifies and updates the node volume data.
1557 02c521e4 Iustin Pop

1558 02c521e4 Iustin Pop
    This function will update a L{NodeImage}'s internal structures
1559 02c521e4 Iustin Pop
    with data from the remote call.
1560 02c521e4 Iustin Pop

1561 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1562 02c521e4 Iustin Pop
    @param ninfo: the node to check
1563 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1564 02c521e4 Iustin Pop
    @param nimg: the node image object
1565 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1566 02c521e4 Iustin Pop

1567 02c521e4 Iustin Pop
    """
1568 02c521e4 Iustin Pop
    node = ninfo.name
1569 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1570 02c521e4 Iustin Pop
1571 02c521e4 Iustin Pop
    nimg.lvm_fail = True
1572 02c521e4 Iustin Pop
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1573 02c521e4 Iustin Pop
    if vg_name is None:
1574 02c521e4 Iustin Pop
      pass
1575 02c521e4 Iustin Pop
    elif isinstance(lvdata, basestring):
1576 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1577 02c521e4 Iustin Pop
               utils.SafeEncode(lvdata))
1578 02c521e4 Iustin Pop
    elif not isinstance(lvdata, dict):
1579 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1580 02c521e4 Iustin Pop
    else:
1581 02c521e4 Iustin Pop
      nimg.volumes = lvdata
1582 02c521e4 Iustin Pop
      nimg.lvm_fail = False
1583 02c521e4 Iustin Pop
1584 02c521e4 Iustin Pop
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1585 02c521e4 Iustin Pop
    """Verifies and updates the node instance list.
1586 02c521e4 Iustin Pop

1587 02c521e4 Iustin Pop
    If the listing was successful, then updates this node's instance
1588 02c521e4 Iustin Pop
    list. Otherwise, it marks the RPC call as failed for the instance
1589 02c521e4 Iustin Pop
    list key.
1590 02c521e4 Iustin Pop

1591 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1592 02c521e4 Iustin Pop
    @param ninfo: the node to check
1593 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1594 02c521e4 Iustin Pop
    @param nimg: the node image object
1595 02c521e4 Iustin Pop

1596 02c521e4 Iustin Pop
    """
1597 02c521e4 Iustin Pop
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1598 02c521e4 Iustin Pop
    test = not isinstance(idata, list)
1599 02c521e4 Iustin Pop
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1600 02c521e4 Iustin Pop
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1601 02c521e4 Iustin Pop
    if test:
1602 02c521e4 Iustin Pop
      nimg.hyp_fail = True
1603 02c521e4 Iustin Pop
    else:
1604 02c521e4 Iustin Pop
      nimg.instances = idata
1605 02c521e4 Iustin Pop
1606 02c521e4 Iustin Pop
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1607 02c521e4 Iustin Pop
    """Verifies and computes a node information map
1608 02c521e4 Iustin Pop

1609 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1610 02c521e4 Iustin Pop
    @param ninfo: the node to check
1611 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1612 02c521e4 Iustin Pop
    @param nimg: the node image object
1613 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1614 02c521e4 Iustin Pop

1615 02c521e4 Iustin Pop
    """
1616 02c521e4 Iustin Pop
    node = ninfo.name
1617 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1618 02c521e4 Iustin Pop
1619 02c521e4 Iustin Pop
    # try to read free memory (from the hypervisor)
1620 02c521e4 Iustin Pop
    hv_info = nresult.get(constants.NV_HVINFO, None)
1621 02c521e4 Iustin Pop
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1622 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1623 02c521e4 Iustin Pop
    if not test:
1624 02c521e4 Iustin Pop
      try:
1625 02c521e4 Iustin Pop
        nimg.mfree = int(hv_info["memory_free"])
1626 02c521e4 Iustin Pop
      except (ValueError, TypeError):
1627 02c521e4 Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1628 02c521e4 Iustin Pop
                 "node returned invalid nodeinfo, check hypervisor")
1629 02c521e4 Iustin Pop
1630 02c521e4 Iustin Pop
    # FIXME: devise a free space model for file based instances as well
1631 02c521e4 Iustin Pop
    if vg_name is not None:
1632 02c521e4 Iustin Pop
      test = (constants.NV_VGLIST not in nresult or
1633 02c521e4 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST])
1634 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODELVM, node,
1635 02c521e4 Iustin Pop
               "node didn't return data for the volume group '%s'"
1636 02c521e4 Iustin Pop
               " - it is either missing or broken", vg_name)
1637 02c521e4 Iustin Pop
      if not test:
1638 02c521e4 Iustin Pop
        try:
1639 02c521e4 Iustin Pop
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1640 02c521e4 Iustin Pop
        except (ValueError, TypeError):
1641 02c521e4 Iustin Pop
          _ErrorIf(True, self.ENODERPC, node,
1642 02c521e4 Iustin Pop
                   "node returned invalid LVM info, check LVM status")
1643 02c521e4 Iustin Pop
1644 a8083063 Iustin Pop
  def CheckPrereq(self):
1645 a8083063 Iustin Pop
    """Check prerequisites.
1646 a8083063 Iustin Pop

1647 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1648 e54c4c5e Guido Trotter
    all its members are valid.
1649 a8083063 Iustin Pop

1650 a8083063 Iustin Pop
    """
1651 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1652 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1653 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1654 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1655 a8083063 Iustin Pop
1656 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1657 d8fff41c Guido Trotter
    """Build hooks env.
1658 d8fff41c Guido Trotter

1659 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1660 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1661 d8fff41c Guido Trotter

1662 d8fff41c Guido Trotter
    """
1663 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1664 35e994e9 Iustin Pop
    env = {
1665 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1666 35e994e9 Iustin Pop
      }
1667 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1668 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1669 35e994e9 Iustin Pop
1670 d8fff41c Guido Trotter
    return env, [], all_nodes
1671 d8fff41c Guido Trotter
1672 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1673 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1674 a8083063 Iustin Pop

1675 a8083063 Iustin Pop
    """
1676 a0c9776a Iustin Pop
    self.bad = False
1677 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1678 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1679 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1680 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1681 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1682 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1683 a8083063 Iustin Pop
1684 b98bf262 Michael Hanselmann
    # Check the cluster certificates
1685 b98bf262 Michael Hanselmann
    for cert_filename in constants.ALL_CERT_FILES:
1686 b98bf262 Michael Hanselmann
      (errcode, msg) = _VerifyCertificate(cert_filename)
1687 b98bf262 Michael Hanselmann
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1688 b98bf262 Michael Hanselmann
1689 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1690 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1691 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1692 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1693 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1694 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1695 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1696 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1697 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1698 02c521e4 Iustin Pop
    n_offline = 0 # Count of offline nodes
1699 02c521e4 Iustin Pop
    n_drained = 0 # Count of nodes being drained
1700 02c521e4 Iustin Pop
    node_vol_should = {}
1701 a8083063 Iustin Pop
1702 a8083063 Iustin Pop
    # FIXME: verify OS list
1703 a8083063 Iustin Pop
    # do local checksums
1704 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1705 112f18a5 Iustin Pop
1706 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1707 d3100055 Michael Hanselmann
    file_names.extend(constants.ALL_CERT_FILES)
1708 112f18a5 Iustin Pop
    file_names.extend(master_files)
1709 112f18a5 Iustin Pop
1710 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1713 a8083063 Iustin Pop
    node_verify_param = {
1714 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1715 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1716 82e37788 Iustin Pop
                              if not node.offline],
1717 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1718 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1719 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1720 82e37788 Iustin Pop
                                 if not node.offline],
1721 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1722 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1723 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1724 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1725 313b2dd4 Michael Hanselmann
      constants.NV_TIME: None,
1726 a8083063 Iustin Pop
      }
1727 313b2dd4 Michael Hanselmann
1728 cc9e1230 Guido Trotter
    if vg_name is not None:
1729 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1730 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1731 d091393e Iustin Pop
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1732 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1733 313b2dd4 Michael Hanselmann
1734 02c521e4 Iustin Pop
    # Build our expected cluster state
1735 02c521e4 Iustin Pop
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1736 02c521e4 Iustin Pop
                      for node in nodeinfo)
1737 02c521e4 Iustin Pop
1738 02c521e4 Iustin Pop
    for instance in instancelist:
1739 02c521e4 Iustin Pop
      inst_config = instanceinfo[instance]
1740 02c521e4 Iustin Pop
1741 02c521e4 Iustin Pop
      for nname in inst_config.all_nodes:
1742 02c521e4 Iustin Pop
        if nname not in node_image:
1743 02c521e4 Iustin Pop
          # ghost node
1744 02c521e4 Iustin Pop
          gnode = self.NodeImage()
1745 02c521e4 Iustin Pop
          gnode.ghost = True
1746 02c521e4 Iustin Pop
          node_image[nname] = gnode
1747 02c521e4 Iustin Pop
1748 02c521e4 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1749 02c521e4 Iustin Pop
1750 02c521e4 Iustin Pop
      pnode = inst_config.primary_node
1751 02c521e4 Iustin Pop
      node_image[pnode].pinst.append(instance)
1752 02c521e4 Iustin Pop
1753 02c521e4 Iustin Pop
      for snode in inst_config.secondary_nodes:
1754 02c521e4 Iustin Pop
        nimg = node_image[snode]
1755 02c521e4 Iustin Pop
        nimg.sinst.append(instance)
1756 02c521e4 Iustin Pop
        if pnode not in nimg.sbp:
1757 02c521e4 Iustin Pop
          nimg.sbp[pnode] = []
1758 02c521e4 Iustin Pop
        nimg.sbp[pnode].append(instance)
1759 02c521e4 Iustin Pop
1760 02c521e4 Iustin Pop
    # At this point, we have the in-memory data structures complete,
1761 02c521e4 Iustin Pop
    # except for the runtime information, which we'll gather next
1762 02c521e4 Iustin Pop
1763 313b2dd4 Michael Hanselmann
    # Due to the way our RPC system works, exact response times cannot be
1764 313b2dd4 Michael Hanselmann
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1765 313b2dd4 Michael Hanselmann
    # time before and after executing the request, we can at least have a time
1766 313b2dd4 Michael Hanselmann
    # window.
1767 313b2dd4 Michael Hanselmann
    nvinfo_starttime = time.time()
1768 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1769 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1770 313b2dd4 Michael Hanselmann
    nvinfo_endtime = time.time()
1771 a8083063 Iustin Pop
1772 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1773 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1774 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1775 6d2e83d5 Iustin Pop
1776 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1777 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1778 112f18a5 Iustin Pop
      node = node_i.name
1779 02c521e4 Iustin Pop
      nimg = node_image[node]
1780 25361b9a Iustin Pop
1781 0a66c968 Iustin Pop
      if node_i.offline:
1782 7c874ee1 Iustin Pop
        if verbose:
1783 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1784 02c521e4 Iustin Pop
        n_offline += 1
1785 0a66c968 Iustin Pop
        continue
1786 0a66c968 Iustin Pop
1787 112f18a5 Iustin Pop
      if node == master_node:
1788 25361b9a Iustin Pop
        ntype = "master"
1789 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1790 25361b9a Iustin Pop
        ntype = "master candidate"
1791 22f0f71d Iustin Pop
      elif node_i.drained:
1792 22f0f71d Iustin Pop
        ntype = "drained"
1793 02c521e4 Iustin Pop
        n_drained += 1
1794 112f18a5 Iustin Pop
      else:
1795 25361b9a Iustin Pop
        ntype = "regular"
1796 7c874ee1 Iustin Pop
      if verbose:
1797 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1798 25361b9a Iustin Pop
1799 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1800 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1801 6f68a739 Iustin Pop
      if msg:
1802 02c521e4 Iustin Pop
        nimg.rpc_fail = True
1803 25361b9a Iustin Pop
        continue
1804 25361b9a Iustin Pop
1805 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1806 a8083063 Iustin Pop
1807 02c521e4 Iustin Pop
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1808 02c521e4 Iustin Pop
      self._VerifyNodeNetwork(node_i, nresult)
1809 02c521e4 Iustin Pop
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1810 02c521e4 Iustin Pop
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1811 02c521e4 Iustin Pop
                            master_files)
1812 02c521e4 Iustin Pop
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1813 02c521e4 Iustin Pop
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1814 a8083063 Iustin Pop
1815 02c521e4 Iustin Pop
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1816 02c521e4 Iustin Pop
      self._UpdateNodeInstances(node_i, nresult, nimg)
1817 02c521e4 Iustin Pop
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1818 a8083063 Iustin Pop
1819 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1820 a8083063 Iustin Pop
    for instance in instancelist:
1821 7c874ee1 Iustin Pop
      if verbose:
1822 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1823 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1824 02c521e4 Iustin Pop
      self._VerifyInstance(instance, inst_config, node_image)
1825 832261fd Iustin Pop
      inst_nodes_offline = []
1826 a8083063 Iustin Pop
1827 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1828 02c521e4 Iustin Pop
      pnode_img = node_image[pnode]
1829 02c521e4 Iustin Pop
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1830 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1831 a0c9776a Iustin Pop
               " primary node failed", instance)
1832 93e4c50b Guido Trotter
1833 02c521e4 Iustin Pop
      if pnode_img.offline:
1834 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1835 832261fd Iustin Pop
1836 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1837 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1838 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1839 93e4c50b Guido Trotter
      # supported either.
1840 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1841 02c521e4 Iustin Pop
      if not inst_config.secondary_nodes:
1842 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1843 02c521e4 Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1844 02c521e4 Iustin Pop
               instance, "instance has multiple secondary nodes: %s",
1845 02c521e4 Iustin Pop
               utils.CommaJoin(inst_config.secondary_nodes),
1846 02c521e4 Iustin Pop
               code=self.ETYPE_WARNING)
1847 93e4c50b Guido Trotter
1848 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1849 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1850 3924700f Iustin Pop
1851 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1852 02c521e4 Iustin Pop
        s_img = node_image[snode]
1853 02c521e4 Iustin Pop
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1854 02c521e4 Iustin Pop
                 "instance %s, connection to secondary node failed", instance)
1855 02c521e4 Iustin Pop
1856 02c521e4 Iustin Pop
        if s_img.offline:
1857 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1858 832261fd Iustin Pop
1859 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1860 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1861 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1862 1f864b60 Iustin Pop
               utils.CommaJoin(inst_nodes_offline))
1863 02c521e4 Iustin Pop
      # ... or ghost nodes
1864 02c521e4 Iustin Pop
      for node in inst_config.all_nodes:
1865 02c521e4 Iustin Pop
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1866 02c521e4 Iustin Pop
                 "instance lives on ghost node %s", node)
1867 93e4c50b Guido Trotter
1868 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1869 02c521e4 Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1870 a8083063 Iustin Pop
1871 02c521e4 Iustin Pop
    feedback_fn("* Verifying oprhan instances")
1872 02c521e4 Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_image)
1873 a8083063 Iustin Pop
1874 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1875 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1876 02c521e4 Iustin Pop
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1877 2b3b6ddd Guido Trotter
1878 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1879 2b3b6ddd Guido Trotter
    if i_non_redundant:
1880 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1881 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1882 2b3b6ddd Guido Trotter
1883 3924700f Iustin Pop
    if i_non_a_balanced:
1884 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1885 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1886 3924700f Iustin Pop
1887 0a66c968 Iustin Pop
    if n_offline:
1888 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1889 0a66c968 Iustin Pop
1890 22f0f71d Iustin Pop
    if n_drained:
1891 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1892 22f0f71d Iustin Pop
1893 a0c9776a Iustin Pop
    return not self.bad
1894 a8083063 Iustin Pop
1895 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1896 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1897 e4376078 Iustin Pop

1898 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1899 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1900 d8fff41c Guido Trotter

1901 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1902 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1903 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1904 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1905 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1906 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1907 e4376078 Iustin Pop
        and hook results
1908 d8fff41c Guido Trotter

1909 d8fff41c Guido Trotter
    """
1910 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1911 38206f3c Iustin Pop
    # their results
1912 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1913 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1914 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1915 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1916 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1917 7c874ee1 Iustin Pop
1918 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1919 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1920 7c874ee1 Iustin Pop
        msg = res.fail_msg
1921 a0c9776a Iustin Pop
        test = msg and not res.offline
1922 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1923 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1924 dd9e9f9c Michael Hanselmann
        if res.offline or msg:
1925 dd9e9f9c Michael Hanselmann
          # No need to investigate payload if node is offline or gave an error.
1926 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1927 a0c9776a Iustin Pop
          # overrides self.bad
1928 7c874ee1 Iustin Pop
          lu_result = 1
1929 7c874ee1 Iustin Pop
          continue
1930 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1931 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1932 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1933 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1934 a0c9776a Iustin Pop
          if test:
1935 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1936 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1937 6d7b472a Iustin Pop
            lu_result = 0
1938 d8fff41c Guido Trotter
1939 d8fff41c Guido Trotter
      return lu_result
1940 d8fff41c Guido Trotter
1941 a8083063 Iustin Pop
1942 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1943 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1944 2c95a8d4 Iustin Pop

1945 2c95a8d4 Iustin Pop
  """
1946 2c95a8d4 Iustin Pop
  _OP_REQP = []
1947 d4b9d97f Guido Trotter
  REQ_BGL = False
1948 d4b9d97f Guido Trotter
1949 d4b9d97f Guido Trotter
  def ExpandNames(self):
1950 d4b9d97f Guido Trotter
    self.needed_locks = {
1951 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1952 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1953 d4b9d97f Guido Trotter
    }
1954 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1955 2c95a8d4 Iustin Pop
1956 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1957 2c95a8d4 Iustin Pop
    """Check prerequisites.
1958 2c95a8d4 Iustin Pop

1959 2c95a8d4 Iustin Pop
    This has no prerequisites.
1960 2c95a8d4 Iustin Pop

1961 2c95a8d4 Iustin Pop
    """
1962 2c95a8d4 Iustin Pop
    pass
1963 2c95a8d4 Iustin Pop
1964 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1965 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1966 2c95a8d4 Iustin Pop

1967 29d376ec Iustin Pop
    @rtype: tuple of three items
1968 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1969 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1970 29d376ec Iustin Pop
        missing volumes
1971 29d376ec Iustin Pop

1972 2c95a8d4 Iustin Pop
    """
1973 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1974 2c95a8d4 Iustin Pop
1975 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1976 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1977 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1978 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1979 2c95a8d4 Iustin Pop
1980 2c95a8d4 Iustin Pop
    nv_dict = {}
1981 2c95a8d4 Iustin Pop
    for inst in instances:
1982 2c95a8d4 Iustin Pop
      inst_lvs = {}
1983 0d68c45d Iustin Pop
      if (not inst.admin_up or
1984 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1985 2c95a8d4 Iustin Pop
        continue
1986 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1987 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1988 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1989 2c95a8d4 Iustin Pop
        for vol in vol_list:
1990 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1991 2c95a8d4 Iustin Pop
1992 2c95a8d4 Iustin Pop
    if not nv_dict:
1993 2c95a8d4 Iustin Pop
      return result
1994 2c95a8d4 Iustin Pop
1995 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1996 2c95a8d4 Iustin Pop
1997 2c95a8d4 Iustin Pop
    for node in nodes:
1998 2c95a8d4 Iustin Pop
      # node_volume
1999 29d376ec Iustin Pop
      node_res = node_lvs[node]
2000 29d376ec Iustin Pop
      if node_res.offline:
2001 ea9ddc07 Iustin Pop
        continue
2002 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
2003 29d376ec Iustin Pop
      if msg:
2004 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2005 29d376ec Iustin Pop
        res_nodes[node] = msg
2006 2c95a8d4 Iustin Pop
        continue
2007 2c95a8d4 Iustin Pop
2008 29d376ec Iustin Pop
      lvs = node_res.payload
2009 1122eb25 Iustin Pop
      for lv_name, (_, _, lv_online) in lvs.items():
2010 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
2011 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
2012 b63ed789 Iustin Pop
            and inst.name not in res_instances):
2013 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
2014 2c95a8d4 Iustin Pop
2015 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
2016 b63ed789 Iustin Pop
    # data better
2017 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
2018 b63ed789 Iustin Pop
      if inst.name not in res_missing:
2019 b63ed789 Iustin Pop
        res_missing[inst.name] = []
2020 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
2021 b63ed789 Iustin Pop
2022 2c95a8d4 Iustin Pop
    return result
2023 2c95a8d4 Iustin Pop
2024 2c95a8d4 Iustin Pop
2025 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
2026 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
2027 60975797 Iustin Pop

2028 60975797 Iustin Pop
  """
2029 60975797 Iustin Pop
  _OP_REQP = ["instances"]
2030 60975797 Iustin Pop
  REQ_BGL = False
2031 60975797 Iustin Pop
2032 60975797 Iustin Pop
  def ExpandNames(self):
2033 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
2034 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2035 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2036 60975797 Iustin Pop
2037 60975797 Iustin Pop
    if self.op.instances:
2038 60975797 Iustin Pop
      self.wanted_names = []
2039 60975797 Iustin Pop
      for name in self.op.instances:
2040 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
2041 60975797 Iustin Pop
        self.wanted_names.append(full_name)
2042 60975797 Iustin Pop
      self.needed_locks = {
2043 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
2044 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
2045 60975797 Iustin Pop
        }
2046 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2047 60975797 Iustin Pop
    else:
2048 60975797 Iustin Pop
      self.wanted_names = None
2049 60975797 Iustin Pop
      self.needed_locks = {
2050 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
2051 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2052 60975797 Iustin Pop
        }
2053 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2054 60975797 Iustin Pop
2055 60975797 Iustin Pop
  def DeclareLocks(self, level):
2056 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2057 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
2058 60975797 Iustin Pop
2059 60975797 Iustin Pop
  def CheckPrereq(self):
2060 60975797 Iustin Pop
    """Check prerequisites.
2061 60975797 Iustin Pop

2062 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
2063 60975797 Iustin Pop

2064 60975797 Iustin Pop
    """
2065 60975797 Iustin Pop
    if self.wanted_names is None:
2066 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2067 60975797 Iustin Pop
2068 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2069 60975797 Iustin Pop
                             in self.wanted_names]
2070 60975797 Iustin Pop
2071 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
2072 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
2073 b775c337 Iustin Pop

2074 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
2075 b775c337 Iustin Pop
    children have smaller disk size.
2076 b775c337 Iustin Pop

2077 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
2078 b775c337 Iustin Pop

2079 b775c337 Iustin Pop
    """
2080 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
2081 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
2082 b775c337 Iustin Pop
      fchild = disk.children[0]
2083 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
2084 b775c337 Iustin Pop
      if mismatch:
2085 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2086 b775c337 Iustin Pop
                     fchild.size, disk.size)
2087 b775c337 Iustin Pop
        fchild.size = disk.size
2088 b775c337 Iustin Pop
2089 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
2090 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
2091 b775c337 Iustin Pop
    else:
2092 b775c337 Iustin Pop
      return False
2093 b775c337 Iustin Pop
2094 60975797 Iustin Pop
  def Exec(self, feedback_fn):
2095 60975797 Iustin Pop
    """Verify the size of cluster disks.
2096 60975797 Iustin Pop

2097 60975797 Iustin Pop
    """
2098 60975797 Iustin Pop
    # TODO: check child disks too
2099 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
2100 60975797 Iustin Pop
    per_node_disks = {}
2101 60975797 Iustin Pop
    for instance in self.wanted_instances:
2102 60975797 Iustin Pop
      pnode = instance.primary_node
2103 60975797 Iustin Pop
      if pnode not in per_node_disks:
2104 60975797 Iustin Pop
        per_node_disks[pnode] = []
2105 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
2106 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
2107 60975797 Iustin Pop
2108 60975797 Iustin Pop
    changed = []
2109 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
2110 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
2111 4d9e6835 Iustin Pop
      for dsk in newl:
2112 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
2113 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
2114 3cebe102 Michael Hanselmann
      if result.fail_msg:
2115 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
2116 60975797 Iustin Pop
                        " %s, ignoring", node)
2117 60975797 Iustin Pop
        continue
2118 60975797 Iustin Pop
      if len(result.data) != len(dskl):
2119 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
2120 60975797 Iustin Pop
                        node)
2121 60975797 Iustin Pop
        continue
2122 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2123 60975797 Iustin Pop
        if size is None:
2124 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
2125 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
2126 60975797 Iustin Pop
          continue
2127 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
2128 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
2129 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
2130 60975797 Iustin Pop
          continue
2131 60975797 Iustin Pop
        size = size >> 20
2132 60975797 Iustin Pop
        if size != disk.size:
2133 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2134 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
2135 60975797 Iustin Pop
                       instance.name, disk.size, size)
2136 60975797 Iustin Pop
          disk.size = size
2137 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2138 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
2139 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
2140 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2141 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
2142 60975797 Iustin Pop
    return changed
2143 60975797 Iustin Pop
2144 60975797 Iustin Pop
2145 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
2146 07bd8a51 Iustin Pop
  """Rename the cluster.
2147 07bd8a51 Iustin Pop

2148 07bd8a51 Iustin Pop
  """
2149 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
2150 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
2151 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
2152 07bd8a51 Iustin Pop
2153 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
2154 07bd8a51 Iustin Pop
    """Build hooks env.
2155 07bd8a51 Iustin Pop

2156 07bd8a51 Iustin Pop
    """
2157 07bd8a51 Iustin Pop
    env = {
2158 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2159 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
2160 07bd8a51 Iustin Pop
      }
2161 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2162 47a72f18 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2163 47a72f18 Iustin Pop
    return env, [mn], all_nodes
2164 07bd8a51 Iustin Pop
2165 07bd8a51 Iustin Pop
  def CheckPrereq(self):
2166 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
2167 07bd8a51 Iustin Pop

2168 07bd8a51 Iustin Pop
    """
2169 104f4ca1 Iustin Pop
    hostname = utils.GetHostInfo(self.op.name)
2170 07bd8a51 Iustin Pop
2171 bcf043c9 Iustin Pop
    new_name = hostname.name
2172 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
2173 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
2174 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
2175 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
2176 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2177 5c983ee5 Iustin Pop
                                 " cluster has changed",
2178 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2179 07bd8a51 Iustin Pop
    if new_ip != old_ip:
2180 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2181 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2182 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
2183 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
2184 07bd8a51 Iustin Pop
2185 07bd8a51 Iustin Pop
    self.op.name = new_name
2186 07bd8a51 Iustin Pop
2187 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
2188 07bd8a51 Iustin Pop
    """Rename the cluster.
2189 07bd8a51 Iustin Pop

2190 07bd8a51 Iustin Pop
    """
2191 07bd8a51 Iustin Pop
    clustername = self.op.name
2192 07bd8a51 Iustin Pop
    ip = self.ip
2193 07bd8a51 Iustin Pop
2194 07bd8a51 Iustin Pop
    # shutdown the master IP
2195 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
2196 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
2197 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
2198 07bd8a51 Iustin Pop
2199 07bd8a51 Iustin Pop
    try:
2200 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2201 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
2202 55cf7d83 Iustin Pop
      cluster.master_ip = ip
2203 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
2204 ec85e3d5 Iustin Pop
2205 ec85e3d5 Iustin Pop
      # update the known hosts file
2206 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2207 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
2208 ec85e3d5 Iustin Pop
      try:
2209 ec85e3d5 Iustin Pop
        node_list.remove(master)
2210 ec85e3d5 Iustin Pop
      except ValueError:
2211 ec85e3d5 Iustin Pop
        pass
2212 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
2213 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
2214 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2215 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2216 6f7d4e75 Iustin Pop
        if msg:
2217 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2218 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2219 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
2220 ec85e3d5 Iustin Pop
2221 07bd8a51 Iustin Pop
    finally:
2222 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
2223 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2224 b726aff0 Iustin Pop
      if msg:
2225 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
2226 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
2227 07bd8a51 Iustin Pop
2228 07bd8a51 Iustin Pop
2229 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
2230 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
2231 8084f9f6 Manuel Franceschini

2232 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
2233 e4376078 Iustin Pop
  @param disk: the disk to check
2234 5bbd3f7f Michael Hanselmann
  @rtype: boolean
2235 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
2236 8084f9f6 Manuel Franceschini

2237 8084f9f6 Manuel Franceschini
  """
2238 8084f9f6 Manuel Franceschini
  if disk.children:
2239 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
2240 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
2241 8084f9f6 Manuel Franceschini
        return True
2242 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
2243 8084f9f6 Manuel Franceschini
2244 8084f9f6 Manuel Franceschini
2245 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
2246 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
2247 8084f9f6 Manuel Franceschini

2248 8084f9f6 Manuel Franceschini
  """
2249 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
2250 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
2251 8084f9f6 Manuel Franceschini
  _OP_REQP = []
2252 c53279cf Guido Trotter
  REQ_BGL = False
2253 c53279cf Guido Trotter
2254 3994f455 Iustin Pop
  def CheckArguments(self):
2255 4b7735f9 Iustin Pop
    """Check parameters
2256 4b7735f9 Iustin Pop

2257 4b7735f9 Iustin Pop
    """
2258 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
2259 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
2260 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2261 4b7735f9 Iustin Pop
      try:
2262 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2263 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
2264 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2265 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
2266 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
2267 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
2268 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2269 1338f2b4 Balazs Lecz
2270 3953242f Iustin Pop
    _CheckBooleanOpField(self.op, "maintain_node_health")
2271 4b7735f9 Iustin Pop
2272 1338f2b4 Balazs Lecz
    if self.op.uid_pool:
2273 1338f2b4 Balazs Lecz
      uidpool.CheckUidPool(self.op.uid_pool)
2274 1338f2b4 Balazs Lecz
2275 fdad8c4d Balazs Lecz
    if self.op.add_uids:
2276 fdad8c4d Balazs Lecz
      uidpool.CheckUidPool(self.op.add_uids)
2277 fdad8c4d Balazs Lecz
2278 fdad8c4d Balazs Lecz
    if self.op.remove_uids:
2279 fdad8c4d Balazs Lecz
      uidpool.CheckUidPool(self.op.remove_uids)
2280 fdad8c4d Balazs Lecz
2281 c53279cf Guido Trotter
  def ExpandNames(self):
2282 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
2283 c53279cf Guido Trotter
    # all nodes to be modified.
2284 c53279cf Guido Trotter
    self.needed_locks = {
2285 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
2286 c53279cf Guido Trotter
    }
2287 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2288 8084f9f6 Manuel Franceschini
2289 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
2290 8084f9f6 Manuel Franceschini
    """Build hooks env.
2291 8084f9f6 Manuel Franceschini

2292 8084f9f6 Manuel Franceschini
    """
2293 8084f9f6 Manuel Franceschini
    env = {
2294 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2295 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
2296 8084f9f6 Manuel Franceschini
      }
2297 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2298 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
2299 8084f9f6 Manuel Franceschini
2300 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
2301 8084f9f6 Manuel Franceschini
    """Check prerequisites.
2302 8084f9f6 Manuel Franceschini

2303 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
2304 5f83e263 Iustin Pop
    if the given volume group is valid.
2305 8084f9f6 Manuel Franceschini

2306 8084f9f6 Manuel Franceschini
    """
2307 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
2308 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
2309 8084f9f6 Manuel Franceschini
      for inst in instances:
2310 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
2311 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
2312 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2313 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
2314 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
2315 8084f9f6 Manuel Franceschini
2316 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2317 779c15bb Iustin Pop
2318 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
2319 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
2320 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
2321 8084f9f6 Manuel Franceschini
      for node in node_list:
2322 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
2323 e480923b Iustin Pop
        if msg:
2324 781de953 Iustin Pop
          # ignoring down node
2325 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
2326 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
2327 781de953 Iustin Pop
          continue
2328 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2329 781de953 Iustin Pop
                                              self.op.vg_name,
2330 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
2331 8084f9f6 Manuel Franceschini
        if vgstatus:
2332 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
2333 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2334 8084f9f6 Manuel Franceschini
2335 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
2336 5af3da74 Guido Trotter
    # validate params changes
2337 779c15bb Iustin Pop
    if self.op.beparams:
2338 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2339 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
2340 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2341 779c15bb Iustin Pop
2342 5af3da74 Guido Trotter
    if self.op.nicparams:
2343 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2344 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
2345 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2346 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2347 90b704a1 Guido Trotter
      nic_errors = []
2348 90b704a1 Guido Trotter
2349 90b704a1 Guido Trotter
      # check all instances for consistency
2350 90b704a1 Guido Trotter
      for instance in self.cfg.GetAllInstancesInfo().values():
2351 90b704a1 Guido Trotter
        for nic_idx, nic in enumerate(instance.nics):
2352 90b704a1 Guido Trotter
          params_copy = copy.deepcopy(nic.nicparams)
2353 90b704a1 Guido Trotter
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2354 90b704a1 Guido Trotter
2355 90b704a1 Guido Trotter
          # check parameter syntax
2356 90b704a1 Guido Trotter
          try:
2357 90b704a1 Guido Trotter
            objects.NIC.CheckParameterSyntax(params_filled)
2358 90b704a1 Guido Trotter
          except errors.ConfigurationError, err:
2359 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: %s" %
2360 90b704a1 Guido Trotter
                              (instance.name, nic_idx, err))
2361 90b704a1 Guido Trotter
2362 90b704a1 Guido Trotter
          # if we're moving instances to routed, check that they have an ip
2363 90b704a1 Guido Trotter
          target_mode = params_filled[constants.NIC_MODE]
2364 90b704a1 Guido Trotter
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2365 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2366 90b704a1 Guido Trotter
                              (instance.name, nic_idx))
2367 90b704a1 Guido Trotter
      if nic_errors:
2368 90b704a1 Guido Trotter
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2369 90b704a1 Guido Trotter
                                   "\n".join(nic_errors))
2370 5af3da74 Guido Trotter
2371 779c15bb Iustin Pop
    # hypervisor list/parameters
2372 9f3ac970 Iustin Pop
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2373 779c15bb Iustin Pop
    if self.op.hvparams:
2374 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
2375 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2376 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2377 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
2378 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
2379 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
2380 779c15bb Iustin Pop
        else:
2381 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
2382 779c15bb Iustin Pop
2383 17463d22 Renรฉ Nussbaumer
    # os hypervisor parameters
2384 17463d22 Renรฉ Nussbaumer
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2385 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2386 17463d22 Renรฉ Nussbaumer
      if not isinstance(self.op.os_hvp, dict):
2387 17463d22 Renรฉ Nussbaumer
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2388 17463d22 Renรฉ Nussbaumer
                                   errors.ECODE_INVAL)
2389 17463d22 Renรฉ Nussbaumer
      for os_name, hvs in self.op.os_hvp.items():
2390 17463d22 Renรฉ Nussbaumer
        if not isinstance(hvs, dict):
2391 17463d22 Renรฉ Nussbaumer
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2392 17463d22 Renรฉ Nussbaumer
                                      " input"), errors.ECODE_INVAL)
2393 17463d22 Renรฉ Nussbaumer
        if os_name not in self.new_os_hvp:
2394 17463d22 Renรฉ Nussbaumer
          self.new_os_hvp[os_name] = hvs
2395 17463d22 Renรฉ Nussbaumer
        else:
2396 17463d22 Renรฉ Nussbaumer
          for hv_name, hv_dict in hvs.items():
2397 17463d22 Renรฉ Nussbaumer
            if hv_name not in self.new_os_hvp[os_name]:
2398 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name] = hv_dict
2399 17463d22 Renรฉ Nussbaumer
            else:
2400 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2401 17463d22 Renรฉ Nussbaumer
2402 9f3ac970 Iustin Pop
    # changes to the hypervisor list
2403 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2404 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
2405 b119bccb Guido Trotter
      if not self.hv_list:
2406 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2407 5c983ee5 Iustin Pop
                                   " least one member",
2408 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2409 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2410 b119bccb Guido Trotter
      if invalid_hvs:
2411 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2412 ab3e6da8 Iustin Pop
                                   " entries: %s" %
2413 ab3e6da8 Iustin Pop
                                   utils.CommaJoin(invalid_hvs),
2414 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2415 9f3ac970 Iustin Pop
      for hv in self.hv_list:
2416 9f3ac970 Iustin Pop
        # if the hypervisor doesn't already exist in the cluster
2417 9f3ac970 Iustin Pop
        # hvparams, we initialize it to empty, and then (in both
2418 9f3ac970 Iustin Pop
        # cases) we make sure to fill the defaults, as we might not
2419 9f3ac970 Iustin Pop
        # have a complete defaults list if the hypervisor wasn't
2420 9f3ac970 Iustin Pop
        # enabled before
2421 9f3ac970 Iustin Pop
        if hv not in new_hvp:
2422 9f3ac970 Iustin Pop
          new_hvp[hv] = {}
2423 9f3ac970 Iustin Pop
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2424 9f3ac970 Iustin Pop
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2425 779c15bb Iustin Pop
    else:
2426 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
2427 779c15bb Iustin Pop
2428 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2429 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
2430 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
2431 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2432 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
2433 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
2434 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
2435 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2436 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2437 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
2438 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
2439 779c15bb Iustin Pop
2440 cced4c39 Iustin Pop
    if self.op.os_hvp:
2441 cced4c39 Iustin Pop
      # no need to check any newly-enabled hypervisors, since the
2442 cced4c39 Iustin Pop
      # defaults have already been checked in the above code-block
2443 cced4c39 Iustin Pop
      for os_name, os_hvp in self.new_os_hvp.items():
2444 cced4c39 Iustin Pop
        for hv_name, hv_params in os_hvp.items():
2445 cced4c39 Iustin Pop
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2446 cced4c39 Iustin Pop
          # we need to fill in the new os_hvp on top of the actual hv_p
2447 cced4c39 Iustin Pop
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2448 cced4c39 Iustin Pop
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2449 cced4c39 Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2450 cced4c39 Iustin Pop
          hv_class.CheckParameterSyntax(new_osp)
2451 cced4c39 Iustin Pop
          _CheckHVParams(self, node_list, hv_name, new_osp)
2452 cced4c39 Iustin Pop
2453 cced4c39 Iustin Pop
2454 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
2455 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
2456 8084f9f6 Manuel Franceschini

2457 8084f9f6 Manuel Franceschini
    """
2458 779c15bb Iustin Pop
    if self.op.vg_name is not None:
2459 b2482333 Guido Trotter
      new_volume = self.op.vg_name
2460 b2482333 Guido Trotter
      if not new_volume:
2461 b2482333 Guido Trotter
        new_volume = None
2462 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
2463 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
2464 779c15bb Iustin Pop
      else:
2465 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
2466 779c15bb Iustin Pop
                    " state, not changing")
2467 779c15bb Iustin Pop
    if self.op.hvparams:
2468 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2469 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2470 17463d22 Renรฉ Nussbaumer
      self.cluster.os_hvp = self.new_os_hvp
2471 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2472 9f3ac970 Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2473 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2474 779c15bb Iustin Pop
    if self.op.beparams:
2475 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2476 5af3da74 Guido Trotter
    if self.op.nicparams:
2477 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2478 5af3da74 Guido Trotter
2479 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2480 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2481 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2482 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2483 4b7735f9 Iustin Pop
2484 3953242f Iustin Pop
    if self.op.maintain_node_health is not None:
2485 3953242f Iustin Pop
      self.cluster.maintain_node_health = self.op.maintain_node_health
2486 3953242f Iustin Pop
2487 fdad8c4d Balazs Lecz
    if self.op.add_uids is not None:
2488 fdad8c4d Balazs Lecz
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2489 fdad8c4d Balazs Lecz
2490 fdad8c4d Balazs Lecz
    if self.op.remove_uids is not None:
2491 fdad8c4d Balazs Lecz
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2492 fdad8c4d Balazs Lecz
2493 1338f2b4 Balazs Lecz
    if self.op.uid_pool is not None:
2494 1338f2b4 Balazs Lecz
      self.cluster.uid_pool = self.op.uid_pool
2495 1338f2b4 Balazs Lecz
2496 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2497 8084f9f6 Manuel Franceschini
2498 8084f9f6 Manuel Franceschini
2499 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2500 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2501 28eddce5 Guido Trotter

2502 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2503 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2504 28eddce5 Guido Trotter
  makes sure those are copied.
2505 28eddce5 Guido Trotter

2506 28eddce5 Guido Trotter
  @param lu: calling logical unit
2507 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2508 28eddce5 Guido Trotter

2509 28eddce5 Guido Trotter
  """
2510 28eddce5 Guido Trotter
  # 1. Gather target nodes
2511 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2512 6819dc49 Iustin Pop
  dist_nodes = lu.cfg.GetOnlineNodeList()
2513 28eddce5 Guido Trotter
  if additional_nodes is not None:
2514 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2515 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2516 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2517 a4eae71f Michael Hanselmann
2518 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2519 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2520 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2521 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2522 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2523 6b7d5878 Michael Hanselmann
                    constants.CONFD_HMAC_KEY,
2524 28eddce5 Guido Trotter
                   ])
2525 e1b8653f Guido Trotter
2526 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2527 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2528 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2529 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2530 e1b8653f Guido Trotter
2531 28eddce5 Guido Trotter
  # 3. Perform the files upload
2532 28eddce5 Guido Trotter
  for fname in dist_files:
2533 28eddce5 Guido Trotter
    if os.path.exists(fname):
2534 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2535 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2536 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2537 6f7d4e75 Iustin Pop
        if msg:
2538 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2539 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2540 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2541 28eddce5 Guido Trotter
2542 28eddce5 Guido Trotter
2543 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2544 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2545 afee0879 Iustin Pop

2546 afee0879 Iustin Pop
  This is a very simple LU.
2547 afee0879 Iustin Pop

2548 afee0879 Iustin Pop
  """
2549 afee0879 Iustin Pop
  _OP_REQP = []
2550 afee0879 Iustin Pop
  REQ_BGL = False
2551 afee0879 Iustin Pop
2552 afee0879 Iustin Pop
  def ExpandNames(self):
2553 afee0879 Iustin Pop
    self.needed_locks = {
2554 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2555 afee0879 Iustin Pop
    }
2556 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2557 afee0879 Iustin Pop
2558 afee0879 Iustin Pop
  def CheckPrereq(self):
2559 afee0879 Iustin Pop
    """Check prerequisites.
2560 afee0879 Iustin Pop

2561 afee0879 Iustin Pop
    """
2562 afee0879 Iustin Pop
2563 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2564 afee0879 Iustin Pop
    """Redistribute the configuration.
2565 afee0879 Iustin Pop

2566 afee0879 Iustin Pop
    """
2567 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2568 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2569 afee0879 Iustin Pop
2570 afee0879 Iustin Pop
2571 b6c07b79 Michael Hanselmann
def _WaitForSync(lu, instance, oneshot=False):
2572 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2573 a8083063 Iustin Pop

2574 a8083063 Iustin Pop
  """
2575 a8083063 Iustin Pop
  if not instance.disks:
2576 a8083063 Iustin Pop
    return True
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
  if not oneshot:
2579 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2580 a8083063 Iustin Pop
2581 a8083063 Iustin Pop
  node = instance.primary_node
2582 a8083063 Iustin Pop
2583 a8083063 Iustin Pop
  for dev in instance.disks:
2584 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2585 a8083063 Iustin Pop
2586 6bcb1446 Michael Hanselmann
  # TODO: Convert to utils.Retry
2587 6bcb1446 Michael Hanselmann
2588 a8083063 Iustin Pop
  retries = 0
2589 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2590 a8083063 Iustin Pop
  while True:
2591 a8083063 Iustin Pop
    max_time = 0
2592 a8083063 Iustin Pop
    done = True
2593 a8083063 Iustin Pop
    cumul_degraded = False
2594 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2595 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2596 3efa9051 Iustin Pop
    if msg:
2597 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2598 a8083063 Iustin Pop
      retries += 1
2599 a8083063 Iustin Pop
      if retries >= 10:
2600 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2601 3ecf6786 Iustin Pop
                                 " aborting." % node)
2602 a8083063 Iustin Pop
      time.sleep(6)
2603 a8083063 Iustin Pop
      continue
2604 3efa9051 Iustin Pop
    rstats = rstats.payload
2605 a8083063 Iustin Pop
    retries = 0
2606 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2607 a8083063 Iustin Pop
      if mstat is None:
2608 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2609 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2610 a8083063 Iustin Pop
        continue
2611 36145b12 Michael Hanselmann
2612 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2613 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2614 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2615 a8083063 Iustin Pop
        done = False
2616 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2617 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2618 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2619 a8083063 Iustin Pop
        else:
2620 a8083063 Iustin Pop
          rem_time = "no time estimate"
2621 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2622 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2623 4d4a651d Michael Hanselmann
                         rem_time))
2624 fbafd7a8 Iustin Pop
2625 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2626 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2627 fbafd7a8 Iustin Pop
    # we force restart of the loop
2628 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2629 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2630 fbafd7a8 Iustin Pop
      degr_retries -= 1
2631 fbafd7a8 Iustin Pop
      time.sleep(1)
2632 fbafd7a8 Iustin Pop
      continue
2633 fbafd7a8 Iustin Pop
2634 a8083063 Iustin Pop
    if done or oneshot:
2635 a8083063 Iustin Pop
      break
2636 a8083063 Iustin Pop
2637 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2638 a8083063 Iustin Pop
2639 a8083063 Iustin Pop
  if done:
2640 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2641 a8083063 Iustin Pop
  return not cumul_degraded
2642 a8083063 Iustin Pop
2643 a8083063 Iustin Pop
2644 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2645 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2646 a8083063 Iustin Pop

2647 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2648 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2649 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2650 0834c866 Iustin Pop

2651 a8083063 Iustin Pop
  """
2652 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2653 a8083063 Iustin Pop
2654 a8083063 Iustin Pop
  result = True
2655 96acbc09 Michael Hanselmann
2656 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2657 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2658 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2659 23829f6f Iustin Pop
    if msg:
2660 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2661 23829f6f Iustin Pop
      result = False
2662 23829f6f Iustin Pop
    elif not rstats.payload:
2663 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2664 a8083063 Iustin Pop
      result = False
2665 a8083063 Iustin Pop
    else:
2666 96acbc09 Michael Hanselmann
      if ldisk:
2667 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2668 96acbc09 Michael Hanselmann
      else:
2669 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2670 96acbc09 Michael Hanselmann
2671 a8083063 Iustin Pop
  if dev.children:
2672 a8083063 Iustin Pop
    for child in dev.children:
2673 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2674 a8083063 Iustin Pop
2675 a8083063 Iustin Pop
  return result
2676 a8083063 Iustin Pop
2677 a8083063 Iustin Pop
2678 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2679 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2680 a8083063 Iustin Pop

2681 a8083063 Iustin Pop
  """
2682 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2683 6bf01bbb Guido Trotter
  REQ_BGL = False
2684 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2685 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2686 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2687 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2688 a8083063 Iustin Pop
2689 6bf01bbb Guido Trotter
  def ExpandNames(self):
2690 1f9430d6 Iustin Pop
    if self.op.names:
2691 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2692 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2693 1f9430d6 Iustin Pop
2694 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2695 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2696 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2697 1f9430d6 Iustin Pop
2698 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2699 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2700 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2701 6bf01bbb Guido Trotter
    self.needed_locks = {}
2702 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2703 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2704 6bf01bbb Guido Trotter
2705 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2706 6bf01bbb Guido Trotter
    """Check prerequisites.
2707 6bf01bbb Guido Trotter

2708 6bf01bbb Guido Trotter
    """
2709 6bf01bbb Guido Trotter
2710 1f9430d6 Iustin Pop
  @staticmethod
2711 857121ad Iustin Pop
  def _DiagnoseByOS(rlist):
2712 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2713 1f9430d6 Iustin Pop

2714 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2715 1f9430d6 Iustin Pop

2716 e4376078 Iustin Pop
    @rtype: dict
2717 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2718 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2719 e4376078 Iustin Pop

2720 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2721 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2722 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2723 e4376078 Iustin Pop
          }
2724 1f9430d6 Iustin Pop

2725 1f9430d6 Iustin Pop
    """
2726 1f9430d6 Iustin Pop
    all_os = {}
2727 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2728 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2729 a6ab004b Iustin Pop
    # make all OSes invalid
2730 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2731 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2732 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2733 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2734 1f9430d6 Iustin Pop
        continue
2735 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2736 255dcebd Iustin Pop
        if name not in all_os:
2737 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2738 1f9430d6 Iustin Pop
          # for each node in node_list
2739 255dcebd Iustin Pop
          all_os[name] = {}
2740 a6ab004b Iustin Pop
          for nname in good_nodes:
2741 255dcebd Iustin Pop
            all_os[name][nname] = []
2742 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2743 1f9430d6 Iustin Pop
    return all_os
2744 a8083063 Iustin Pop
2745 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2746 a8083063 Iustin Pop
    """Compute the list of OSes.
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
    """
2749 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2750 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2751 857121ad Iustin Pop
    pol = self._DiagnoseByOS(node_data)
2752 1f9430d6 Iustin Pop
    output = []
2753 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2754 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2755 1e288a26 Guido Trotter
2756 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2757 1f9430d6 Iustin Pop
      row = []
2758 1e288a26 Guido Trotter
      if calc_valid:
2759 1e288a26 Guido Trotter
        valid = True
2760 1e288a26 Guido Trotter
        variants = None
2761 1e288a26 Guido Trotter
        for osl in os_data.values():
2762 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2763 1e288a26 Guido Trotter
          if not valid:
2764 1e288a26 Guido Trotter
            variants = None
2765 1e288a26 Guido Trotter
            break
2766 1e288a26 Guido Trotter
          if calc_variants:
2767 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2768 1e288a26 Guido Trotter
            if variants is None:
2769 1e288a26 Guido Trotter
              variants = node_variants
2770 1e288a26 Guido Trotter
            else:
2771 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2772 1e288a26 Guido Trotter
2773 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2774 1f9430d6 Iustin Pop
        if field == "name":
2775 1f9430d6 Iustin Pop
          val = os_name
2776 1f9430d6 Iustin Pop
        elif field == "valid":
2777 1e288a26 Guido Trotter
          val = valid
2778 1f9430d6 Iustin Pop
        elif field == "node_status":
2779 255dcebd Iustin Pop
          # this is just a copy of the dict
2780 1f9430d6 Iustin Pop
          val = {}
2781 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2782 255dcebd Iustin Pop
            val[node_name] = nos_list
2783 1e288a26 Guido Trotter
        elif field == "variants":
2784 1e288a26 Guido Trotter
          val =  variants
2785 1f9430d6 Iustin Pop
        else:
2786 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2787 1f9430d6 Iustin Pop
        row.append(val)
2788 1f9430d6 Iustin Pop
      output.append(row)
2789 1f9430d6 Iustin Pop
2790 1f9430d6 Iustin Pop
    return output
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
2793 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2794 a8083063 Iustin Pop
  """Logical unit for removing a node.
2795 a8083063 Iustin Pop

2796 a8083063 Iustin Pop
  """
2797 a8083063 Iustin Pop
  HPATH = "node-remove"
2798 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2799 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2800 a8083063 Iustin Pop
2801 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2802 a8083063 Iustin Pop
    """Build hooks env.
2803 a8083063 Iustin Pop

2804 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2805 d08869ee Guido Trotter
    node would then be impossible to remove.
2806 a8083063 Iustin Pop

2807 a8083063 Iustin Pop
    """
2808 396e1b78 Michael Hanselmann
    env = {
2809 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2810 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2811 396e1b78 Michael Hanselmann
      }
2812 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2813 9bb31ea8 Iustin Pop
    try:
2814 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2815 9bb31ea8 Iustin Pop
    except ValueError:
2816 9bb31ea8 Iustin Pop
      logging.warning("Node %s which is about to be removed not found"
2817 9bb31ea8 Iustin Pop
                      " in the all nodes list", self.op.node_name)
2818 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2819 a8083063 Iustin Pop
2820 a8083063 Iustin Pop
  def CheckPrereq(self):
2821 a8083063 Iustin Pop
    """Check prerequisites.
2822 a8083063 Iustin Pop

2823 a8083063 Iustin Pop
    This checks:
2824 a8083063 Iustin Pop
     - the node exists in the configuration
2825 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2826 a8083063 Iustin Pop
     - it's not the master
2827 a8083063 Iustin Pop

2828 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2829 a8083063 Iustin Pop

2830 a8083063 Iustin Pop
    """
2831 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2832 cf26a87a Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2833 cf26a87a Iustin Pop
    assert node is not None
2834 a8083063 Iustin Pop
2835 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2836 a8083063 Iustin Pop
2837 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2838 a8083063 Iustin Pop
    if node.name == masternode:
2839 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2840 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2841 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2842 a8083063 Iustin Pop
2843 a8083063 Iustin Pop
    for instance_name in instance_list:
2844 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2845 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2846 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2847 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2848 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2849 a8083063 Iustin Pop
    self.op.node_name = node.name
2850 a8083063 Iustin Pop
    self.node = node
2851 a8083063 Iustin Pop
2852 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2853 a8083063 Iustin Pop
    """Removes the node from the cluster.
2854 a8083063 Iustin Pop

2855 a8083063 Iustin Pop
    """
2856 a8083063 Iustin Pop
    node = self.node
2857 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2858 9a4f63d1 Iustin Pop
                 node.name)
2859 a8083063 Iustin Pop
2860 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2861 b989b9d9 Ken Wehr
2862 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2863 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2864 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2865 a8083063 Iustin Pop
2866 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2867 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2868 cd46f3b4 Luca Bigliardi
    try:
2869 1122eb25 Iustin Pop
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2870 3cb5c1e3 Luca Bigliardi
    except:
2871 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
2872 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2873 cd46f3b4 Luca Bigliardi
2874 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2875 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2876 0623d351 Iustin Pop
    if msg:
2877 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2878 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2879 c8a0948f Michael Hanselmann
2880 a8083063 Iustin Pop
2881 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2882 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2883 a8083063 Iustin Pop

2884 a8083063 Iustin Pop
  """
2885 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
2886 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2887 35705d8f Guido Trotter
  REQ_BGL = False
2888 19bed813 Iustin Pop
2889 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2890 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2891 19bed813 Iustin Pop
2892 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2893 31bf511f Iustin Pop
    "dtotal", "dfree",
2894 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2895 31bf511f Iustin Pop
    "bootid",
2896 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2897 31bf511f Iustin Pop
    )
2898 31bf511f Iustin Pop
2899 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2900 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2901 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2902 31bf511f Iustin Pop
    "pip", "sip", "tags",
2903 0e67cdbe Iustin Pop
    "master",
2904 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2905 31bf511f Iustin Pop
    )
2906 a8083063 Iustin Pop
2907 35705d8f Guido Trotter
  def ExpandNames(self):
2908 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2909 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2910 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2911 a8083063 Iustin Pop
2912 35705d8f Guido Trotter
    self.needed_locks = {}
2913 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2914 c8d8b4c8 Iustin Pop
2915 c8d8b4c8 Iustin Pop
    if self.op.names:
2916 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2917 35705d8f Guido Trotter
    else:
2918 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2919 c8d8b4c8 Iustin Pop
2920 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2921 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2922 c8d8b4c8 Iustin Pop
    if self.do_locking:
2923 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2924 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2925 c8d8b4c8 Iustin Pop
2926 35705d8f Guido Trotter
  def CheckPrereq(self):
2927 35705d8f Guido Trotter
    """Check prerequisites.
2928 35705d8f Guido Trotter

2929 35705d8f Guido Trotter
    """
2930 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2931 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2932 c8d8b4c8 Iustin Pop
    pass
2933 a8083063 Iustin Pop
2934 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2935 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2936 a8083063 Iustin Pop

2937 a8083063 Iustin Pop
    """
2938 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2939 c8d8b4c8 Iustin Pop
    if self.do_locking:
2940 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2941 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2942 3fa93523 Guido Trotter
      nodenames = self.wanted
2943 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2944 3fa93523 Guido Trotter
      if missing:
2945 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2946 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2947 c8d8b4c8 Iustin Pop
    else:
2948 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2949 c1f1cbb2 Iustin Pop
2950 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2951 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2952 a8083063 Iustin Pop
2953 a8083063 Iustin Pop
    # begin data gathering
2954 a8083063 Iustin Pop
2955 bc8e4a1a Iustin Pop
    if self.do_node_query:
2956 a8083063 Iustin Pop
      live_data = {}
2957 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2958 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2959 a8083063 Iustin Pop
      for name in nodenames:
2960 781de953 Iustin Pop
        nodeinfo = node_data[name]
2961 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2962 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2963 d599d686 Iustin Pop
          fn = utils.TryConvert
2964 a8083063 Iustin Pop
          live_data[name] = {
2965 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2966 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2967 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2968 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2969 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2970 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2971 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2972 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2973 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2974 a8083063 Iustin Pop
            }
2975 a8083063 Iustin Pop
        else:
2976 a8083063 Iustin Pop
          live_data[name] = {}
2977 a8083063 Iustin Pop
    else:
2978 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2979 a8083063 Iustin Pop
2980 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2981 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2982 a8083063 Iustin Pop
2983 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2984 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2985 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2986 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2987 a8083063 Iustin Pop
2988 1122eb25 Iustin Pop
      for inst in inst_data.values():
2989 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2990 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2991 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2992 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2993 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2994 a8083063 Iustin Pop
2995 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2996 0e67cdbe Iustin Pop
2997 a8083063 Iustin Pop
    # end data gathering
2998 a8083063 Iustin Pop
2999 a8083063 Iustin Pop
    output = []
3000 a8083063 Iustin Pop
    for node in nodelist:
3001 a8083063 Iustin Pop
      node_output = []
3002 a8083063 Iustin Pop
      for field in self.op.output_fields:
3003 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
3004 19bed813 Iustin Pop
          val = getattr(node, field)
3005 ec223efb Iustin Pop
        elif field == "pinst_list":
3006 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
3007 ec223efb Iustin Pop
        elif field == "sinst_list":
3008 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
3009 ec223efb Iustin Pop
        elif field == "pinst_cnt":
3010 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
3011 ec223efb Iustin Pop
        elif field == "sinst_cnt":
3012 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
3013 a8083063 Iustin Pop
        elif field == "pip":
3014 a8083063 Iustin Pop
          val = node.primary_ip
3015 a8083063 Iustin Pop
        elif field == "sip":
3016 a8083063 Iustin Pop
          val = node.secondary_ip
3017 130a6a6f Iustin Pop
        elif field == "tags":
3018 130a6a6f Iustin Pop
          val = list(node.GetTags())
3019 0e67cdbe Iustin Pop
        elif field == "master":
3020 0e67cdbe Iustin Pop
          val = node.name == master_node
3021 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
3022 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
3023 c120ff34 Iustin Pop
        elif field == "role":
3024 c120ff34 Iustin Pop
          if node.name == master_node:
3025 c120ff34 Iustin Pop
            val = "M"
3026 c120ff34 Iustin Pop
          elif node.master_candidate:
3027 c120ff34 Iustin Pop
            val = "C"
3028 c120ff34 Iustin Pop
          elif node.drained:
3029 c120ff34 Iustin Pop
            val = "D"
3030 c120ff34 Iustin Pop
          elif node.offline:
3031 c120ff34 Iustin Pop
            val = "O"
3032 c120ff34 Iustin Pop
          else:
3033 c120ff34 Iustin Pop
            val = "R"
3034 a8083063 Iustin Pop
        else:
3035 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3036 a8083063 Iustin Pop
        node_output.append(val)
3037 a8083063 Iustin Pop
      output.append(node_output)
3038 a8083063 Iustin Pop
3039 a8083063 Iustin Pop
    return output
3040 a8083063 Iustin Pop
3041 a8083063 Iustin Pop
3042 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
3043 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
3044 dcb93971 Michael Hanselmann

3045 dcb93971 Michael Hanselmann
  """
3046 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
3047 21a15682 Guido Trotter
  REQ_BGL = False
3048 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3049 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
3050 21a15682 Guido Trotter
3051 21a15682 Guido Trotter
  def ExpandNames(self):
3052 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3053 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3054 21a15682 Guido Trotter
                       selected=self.op.output_fields)
3055 21a15682 Guido Trotter
3056 21a15682 Guido Trotter
    self.needed_locks = {}
3057 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3058 21a15682 Guido Trotter
    if not self.op.nodes:
3059 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3060 21a15682 Guido Trotter
    else:
3061 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
3062 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
3063 dcb93971 Michael Hanselmann
3064 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
3065 dcb93971 Michael Hanselmann
    """Check prerequisites.
3066 dcb93971 Michael Hanselmann

3067 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
3068 dcb93971 Michael Hanselmann

3069 dcb93971 Michael Hanselmann
    """
3070 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3071 dcb93971 Michael Hanselmann
3072 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
3073 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3074 dcb93971 Michael Hanselmann

3075 dcb93971 Michael Hanselmann
    """
3076 a7ba5e53 Iustin Pop
    nodenames = self.nodes
3077 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
3078 dcb93971 Michael Hanselmann
3079 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3080 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
3081 dcb93971 Michael Hanselmann
3082 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3083 dcb93971 Michael Hanselmann
3084 dcb93971 Michael Hanselmann
    output = []
3085 dcb93971 Michael Hanselmann
    for node in nodenames:
3086 10bfe6cb Iustin Pop
      nresult = volumes[node]
3087 10bfe6cb Iustin Pop
      if nresult.offline:
3088 10bfe6cb Iustin Pop
        continue
3089 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
3090 10bfe6cb Iustin Pop
      if msg:
3091 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3092 37d19eb2 Michael Hanselmann
        continue
3093 37d19eb2 Michael Hanselmann
3094 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
3095 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
3096 dcb93971 Michael Hanselmann
3097 dcb93971 Michael Hanselmann
      for vol in node_vols:
3098 dcb93971 Michael Hanselmann
        node_output = []
3099 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
3100 dcb93971 Michael Hanselmann
          if field == "node":
3101 dcb93971 Michael Hanselmann
            val = node
3102 dcb93971 Michael Hanselmann
          elif field == "phys":
3103 dcb93971 Michael Hanselmann
            val = vol['dev']
3104 dcb93971 Michael Hanselmann
          elif field == "vg":
3105 dcb93971 Michael Hanselmann
            val = vol['vg']
3106 dcb93971 Michael Hanselmann
          elif field == "name":
3107 dcb93971 Michael Hanselmann
            val = vol['name']
3108 dcb93971 Michael Hanselmann
          elif field == "size":
3109 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
3110 dcb93971 Michael Hanselmann
          elif field == "instance":
3111 dcb93971 Michael Hanselmann
            for inst in ilist:
3112 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
3113 dcb93971 Michael Hanselmann
                continue
3114 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
3115 dcb93971 Michael Hanselmann
                val = inst.name
3116 dcb93971 Michael Hanselmann
                break
3117 dcb93971 Michael Hanselmann
            else:
3118 dcb93971 Michael Hanselmann
              val = '-'
3119 dcb93971 Michael Hanselmann
          else:
3120 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
3121 dcb93971 Michael Hanselmann
          node_output.append(str(val))
3122 dcb93971 Michael Hanselmann
3123 dcb93971 Michael Hanselmann
        output.append(node_output)
3124 dcb93971 Michael Hanselmann
3125 dcb93971 Michael Hanselmann
    return output
3126 dcb93971 Michael Hanselmann
3127 dcb93971 Michael Hanselmann
3128 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
3129 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
3130 9e5442ce Michael Hanselmann

3131 9e5442ce Michael Hanselmann
  """
3132 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3133 9e5442ce Michael Hanselmann
  REQ_BGL = False
3134 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3135 9e5442ce Michael Hanselmann
3136 0e3baaf3 Iustin Pop
  def CheckArguments(self):
3137 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
3138 9e5442ce Michael Hanselmann
3139 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
3140 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3141 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
3142 9e5442ce Michael Hanselmann
3143 0e3baaf3 Iustin Pop
  def ExpandNames(self):
3144 9e5442ce Michael Hanselmann
    self.needed_locks = {}
3145 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
3146 9e5442ce Michael Hanselmann
3147 9e5442ce Michael Hanselmann
    if self.op.nodes:
3148 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
3149 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
3150 9e5442ce Michael Hanselmann
    else:
3151 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3152 9e5442ce Michael Hanselmann
3153 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
3154 9e5442ce Michael Hanselmann
    """Check prerequisites.
3155 9e5442ce Michael Hanselmann

3156 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
3157 9e5442ce Michael Hanselmann

3158 9e5442ce Michael Hanselmann
    """
3159 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
3160 9e5442ce Michael Hanselmann
3161 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3162 9e5442ce Michael Hanselmann
3163 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
3164 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
3165 9e5442ce Michael Hanselmann

3166 9e5442ce Michael Hanselmann
    """
3167 9e5442ce Michael Hanselmann
    # Always get name to sort by
3168 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
3169 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
3170 9e5442ce Michael Hanselmann
    else:
3171 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
3172 9e5442ce Michael Hanselmann
3173 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
3174 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3175 620a85fd Iustin Pop
      while extra in fields:
3176 620a85fd Iustin Pop
        fields.remove(extra)
3177 9e5442ce Michael Hanselmann
3178 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3179 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
3180 9e5442ce Michael Hanselmann
3181 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3182 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
3183 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
3184 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
3185 9e5442ce Michael Hanselmann
3186 9e5442ce Michael Hanselmann
    result = []
3187 9e5442ce Michael Hanselmann
3188 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
3189 9e5442ce Michael Hanselmann
      nresult = data[node]
3190 9e5442ce Michael Hanselmann
      if nresult.offline:
3191 9e5442ce Michael Hanselmann
        continue
3192 9e5442ce Michael Hanselmann
3193 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
3194 9e5442ce Michael Hanselmann
      if msg:
3195 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3196 9e5442ce Michael Hanselmann
        continue
3197 9e5442ce Michael Hanselmann
3198 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3199 9e5442ce Michael Hanselmann
3200 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
3201 9e5442ce Michael Hanselmann
        row = rows[name]
3202 9e5442ce Michael Hanselmann
3203 9e5442ce Michael Hanselmann
        out = []
3204 9e5442ce Michael Hanselmann
3205 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
3206 620a85fd Iustin Pop
          if field == constants.SF_NODE:
3207 9e5442ce Michael Hanselmann
            val = node
3208 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
3209 620a85fd Iustin Pop
            val = self.op.storage_type
3210 9e5442ce Michael Hanselmann
          elif field in field_idx:
3211 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
3212 9e5442ce Michael Hanselmann
          else:
3213 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
3214 9e5442ce Michael Hanselmann
3215 9e5442ce Michael Hanselmann
          out.append(val)
3216 9e5442ce Michael Hanselmann
3217 9e5442ce Michael Hanselmann
        result.append(out)
3218 9e5442ce Michael Hanselmann
3219 9e5442ce Michael Hanselmann
    return result
3220 9e5442ce Michael Hanselmann
3221 9e5442ce Michael Hanselmann
3222 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
3223 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
3224 efb8da02 Michael Hanselmann

3225 efb8da02 Michael Hanselmann
  """
3226 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3227 efb8da02 Michael Hanselmann
  REQ_BGL = False
3228 efb8da02 Michael Hanselmann
3229 efb8da02 Michael Hanselmann
  def CheckArguments(self):
3230 cf26a87a Iustin Pop
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3231 efb8da02 Michael Hanselmann
3232 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
3233 efb8da02 Michael Hanselmann
3234 efb8da02 Michael Hanselmann
  def ExpandNames(self):
3235 efb8da02 Michael Hanselmann
    self.needed_locks = {
3236 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
3237 efb8da02 Michael Hanselmann
      }
3238 efb8da02 Michael Hanselmann
3239 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
3240 efb8da02 Michael Hanselmann
    """Check prerequisites.
3241 efb8da02 Michael Hanselmann

3242 efb8da02 Michael Hanselmann
    """
3243 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3244 efb8da02 Michael Hanselmann
3245 efb8da02 Michael Hanselmann
    try:
3246 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3247 efb8da02 Michael Hanselmann
    except KeyError:
3248 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3249 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
3250 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3251 efb8da02 Michael Hanselmann
3252 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
3253 efb8da02 Michael Hanselmann
    if diff:
3254 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
3255 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
3256 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
3257 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3258 efb8da02 Michael Hanselmann
3259 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
3260 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3261 efb8da02 Michael Hanselmann

3262 efb8da02 Michael Hanselmann
    """
3263 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3264 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
3265 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
3266 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
3267 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
3268 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
3269 efb8da02 Michael Hanselmann
3270 efb8da02 Michael Hanselmann
3271 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
3272 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
3273 a8083063 Iustin Pop

3274 a8083063 Iustin Pop
  """
3275 a8083063 Iustin Pop
  HPATH = "node-add"
3276 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3277 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
3278 a8083063 Iustin Pop
3279 44caf5a8 Iustin Pop
  def CheckArguments(self):
3280 44caf5a8 Iustin Pop
    # validate/normalize the node name
3281 44caf5a8 Iustin Pop
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3282 44caf5a8 Iustin Pop
3283 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3284 a8083063 Iustin Pop
    """Build hooks env.
3285 a8083063 Iustin Pop

3286 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
3287 a8083063 Iustin Pop

3288 a8083063 Iustin Pop
    """
3289 a8083063 Iustin Pop
    env = {
3290 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
3291 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
3292 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
3293 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
3294 a8083063 Iustin Pop
      }
3295 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
3296 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
3297 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
3298 a8083063 Iustin Pop
3299 a8083063 Iustin Pop
  def CheckPrereq(self):
3300 a8083063 Iustin Pop
    """Check prerequisites.
3301 a8083063 Iustin Pop

3302 a8083063 Iustin Pop
    This checks:
3303 a8083063 Iustin Pop
     - the new node is not already in the config
3304 a8083063 Iustin Pop
     - it is resolvable
3305 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
3306 a8083063 Iustin Pop

3307 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
3308 a8083063 Iustin Pop

3309 a8083063 Iustin Pop
    """
3310 a8083063 Iustin Pop
    node_name = self.op.node_name
3311 a8083063 Iustin Pop
    cfg = self.cfg
3312 a8083063 Iustin Pop
3313 104f4ca1 Iustin Pop
    dns_data = utils.GetHostInfo(node_name)
3314 a8083063 Iustin Pop
3315 bcf043c9 Iustin Pop
    node = dns_data.name
3316 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
3317 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
3318 a8083063 Iustin Pop
    if secondary_ip is None:
3319 a8083063 Iustin Pop
      secondary_ip = primary_ip
3320 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
3321 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
3322 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3323 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
3324 e7c6e02b Michael Hanselmann
3325 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
3326 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
3327 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3328 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
3329 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
3330 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3331 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
3332 a8083063 Iustin Pop
3333 1513e2dd Iustin Pop
    self.changed_primary_ip = False
3334 1513e2dd Iustin Pop
3335 a8083063 Iustin Pop
    for existing_node_name in node_list:
3336 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
3337 e7c6e02b Michael Hanselmann
3338 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
3339 1513e2dd Iustin Pop
        if existing_node.secondary_ip != secondary_ip:
3340 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3341 5c983ee5 Iustin Pop
                                     " address configuration as before",
3342 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
3343 1513e2dd Iustin Pop
        if existing_node.primary_ip != primary_ip:
3344 1513e2dd Iustin Pop
          self.changed_primary_ip = True
3345 1513e2dd Iustin Pop
3346 e7c6e02b Michael Hanselmann
        continue
3347 e7c6e02b Michael Hanselmann
3348 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
3349 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
3350 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
3351 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
3352 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3353 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
3354 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
3355 a8083063 Iustin Pop
3356 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
3357 a8083063 Iustin Pop
    # same as for the master
3358 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3359 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3360 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
3361 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
3362 a8083063 Iustin Pop
      if master_singlehomed:
3363 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
3364 5c983ee5 Iustin Pop
                                   " new node has one",
3365 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3366 a8083063 Iustin Pop
      else:
3367 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
3368 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
3369 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3370 a8083063 Iustin Pop
3371 5bbd3f7f Michael Hanselmann
    # checks reachability
3372 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3373 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
3374 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
    if not newbie_singlehomed:
3377 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
3378 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3379 b15d625f Iustin Pop
                           source=myself.secondary_ip):
3380 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3381 5c983ee5 Iustin Pop
                                   " based ping to noded port",
3382 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
3383 a8083063 Iustin Pop
3384 a8ae3eb5 Iustin Pop
    if self.op.readd:
3385 a8ae3eb5 Iustin Pop
      exceptions = [node]
3386 a8ae3eb5 Iustin Pop
    else:
3387 a8ae3eb5 Iustin Pop
      exceptions = []
3388 6d7e1f20 Guido Trotter
3389 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3390 0fff97e9 Guido Trotter
3391 a8ae3eb5 Iustin Pop
    if self.op.readd:
3392 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
3393 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3394 a8ae3eb5 Iustin Pop
    else:
3395 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
3396 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
3397 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
3398 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
3399 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3402 a8083063 Iustin Pop
    """Adds the new node to the cluster.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 a8083063 Iustin Pop
    new_node = self.new_node
3406 a8083063 Iustin Pop
    node = new_node.name
3407 a8083063 Iustin Pop
3408 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
3409 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
3410 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
3411 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
3412 a8ae3eb5 Iustin Pop
    if self.op.readd:
3413 7260cfbe Iustin Pop
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3414 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3415 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
3416 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
3417 1513e2dd Iustin Pop
      if self.changed_primary_ip:
3418 1513e2dd Iustin Pop
        new_node.primary_ip = self.op.primary_ip
3419 a8ae3eb5 Iustin Pop
3420 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
3421 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
3422 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
3423 a8ae3eb5 Iustin Pop
3424 a8083063 Iustin Pop
    # check connectivity
3425 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
3426 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
3427 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
3428 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
3429 90b54c26 Iustin Pop
                   node, result.payload)
3430 a8083063 Iustin Pop
    else:
3431 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
3432 90b54c26 Iustin Pop
                               " node version %s" %
3433 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
3434 a8083063 Iustin Pop
3435 a8083063 Iustin Pop
    # setup ssh on node
3436 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3437 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
3438 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3439 b989b9d9 Ken Wehr
      keyarray = []
3440 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3441 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3442 b989b9d9 Ken Wehr
                  priv_key, pub_key]
3443 b989b9d9 Ken Wehr
3444 b989b9d9 Ken Wehr
      for i in keyfiles:
3445 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
3446 b989b9d9 Ken Wehr
3447 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3448 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
3449 b989b9d9 Ken Wehr
                                      keyarray[5])
3450 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
3453 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3454 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
3455 c8a0948f Michael Hanselmann
3456 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
3457 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
3458 781de953 Iustin Pop
                                                 new_node.secondary_ip)
3459 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3460 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3461 c2fc8250 Iustin Pop
      if not result.payload:
3462 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3463 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
3464 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
3465 a8083063 Iustin Pop
3466 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
3467 5c0527ed Guido Trotter
    node_verify_param = {
3468 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
3469 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
3470 5c0527ed Guido Trotter
    }
3471 5c0527ed Guido Trotter
3472 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3473 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
3474 5c0527ed Guido Trotter
    for verifier in node_verify_list:
3475 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3476 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3477 6f68a739 Iustin Pop
      if nl_payload:
3478 6f68a739 Iustin Pop
        for failed in nl_payload:
3479 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
3480 31821208 Iustin Pop
                      " (checking from %s): %s" %
3481 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
3482 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
3483 ff98055b Iustin Pop
3484 d8470559 Michael Hanselmann
    if self.op.readd:
3485 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
3486 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
3487 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
3488 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
3489 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3490 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3491 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3492 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3493 a8ae3eb5 Iustin Pop
        if msg:
3494 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3495 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3496 d8470559 Michael Hanselmann
    else:
3497 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3498 0debfb35 Guido Trotter
      self.context.AddNode(new_node, self.proc.GetECId())
3499 a8083063 Iustin Pop
3500 a8083063 Iustin Pop
3501 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3502 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3503 b31c8676 Iustin Pop

3504 b31c8676 Iustin Pop
  """
3505 b31c8676 Iustin Pop
  HPATH = "node-modify"
3506 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3507 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3508 b31c8676 Iustin Pop
  REQ_BGL = False
3509 b31c8676 Iustin Pop
3510 b31c8676 Iustin Pop
  def CheckArguments(self):
3511 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3512 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3513 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3514 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3515 601908d0 Iustin Pop
    _CheckBooleanOpField(self.op, 'auto_promote')
3516 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3517 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3518 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3519 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3520 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3521 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3522 5c983ee5 Iustin Pop
                                 " state at the same time",
3523 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3524 b31c8676 Iustin Pop
3525 601908d0 Iustin Pop
    # Boolean value that tells us whether we're offlining or draining the node
3526 601908d0 Iustin Pop
    self.offline_or_drain = (self.op.offline == True or
3527 601908d0 Iustin Pop
                             self.op.drained == True)
3528 601908d0 Iustin Pop
    self.deoffline_or_drain = (self.op.offline == False or
3529 601908d0 Iustin Pop
                               self.op.drained == False)
3530 601908d0 Iustin Pop
    self.might_demote = (self.op.master_candidate == False or
3531 601908d0 Iustin Pop
                         self.offline_or_drain)
3532 601908d0 Iustin Pop
3533 601908d0 Iustin Pop
    self.lock_all = self.op.auto_promote and self.might_demote
3534 601908d0 Iustin Pop
3535 601908d0 Iustin Pop
3536 b31c8676 Iustin Pop
  def ExpandNames(self):
3537 601908d0 Iustin Pop
    if self.lock_all:
3538 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3539 601908d0 Iustin Pop
    else:
3540 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3541 b31c8676 Iustin Pop
3542 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3543 b31c8676 Iustin Pop
    """Build hooks env.
3544 b31c8676 Iustin Pop

3545 b31c8676 Iustin Pop
    This runs on the master node.
3546 b31c8676 Iustin Pop

3547 b31c8676 Iustin Pop
    """
3548 b31c8676 Iustin Pop
    env = {
3549 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3550 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3551 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3552 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3553 b31c8676 Iustin Pop
      }
3554 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3555 b31c8676 Iustin Pop
          self.op.node_name]
3556 b31c8676 Iustin Pop
    return env, nl, nl
3557 b31c8676 Iustin Pop
3558 b31c8676 Iustin Pop
  def CheckPrereq(self):
3559 b31c8676 Iustin Pop
    """Check prerequisites.
3560 b31c8676 Iustin Pop

3561 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3562 b31c8676 Iustin Pop

3563 b31c8676 Iustin Pop
    """
3564 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3565 b31c8676 Iustin Pop
3566 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3567 97c61d46 Iustin Pop
        self.op.drained is not None or
3568 97c61d46 Iustin Pop
        self.op.offline is not None):
3569 97c61d46 Iustin Pop
      # we can't change the master's node flags
3570 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3571 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3572 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3573 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3574 97c61d46 Iustin Pop
3575 601908d0 Iustin Pop
3576 601908d0 Iustin Pop
    if node.master_candidate and self.might_demote and not self.lock_all:
3577 601908d0 Iustin Pop
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3578 601908d0 Iustin Pop
      # check if after removing the current node, we're missing master
3579 601908d0 Iustin Pop
      # candidates
3580 601908d0 Iustin Pop
      (mc_remaining, mc_should, _) = \
3581 601908d0 Iustin Pop
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3582 8fe9239e Iustin Pop
      if mc_remaining < mc_should:
3583 601908d0 Iustin Pop
        raise errors.OpPrereqError("Not enough master candidates, please"
3584 601908d0 Iustin Pop
                                   " pass auto_promote to allow promotion",
3585 601908d0 Iustin Pop
                                   errors.ECODE_INVAL)
3586 3e83dd48 Iustin Pop
3587 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3588 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3589 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3590 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3591 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3592 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3593 3a5ba66a Iustin Pop
3594 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3595 601908d0 Iustin Pop
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3596 cea0534a Guido Trotter
        self.op.master_candidate == True and not node.master_candidate):
3597 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3598 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3599 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3600 3d9eb52b Guido Trotter
3601 b31c8676 Iustin Pop
    return
3602 b31c8676 Iustin Pop
3603 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3604 b31c8676 Iustin Pop
    """Modifies a node.
3605 b31c8676 Iustin Pop

3606 b31c8676 Iustin Pop
    """
3607 3a5ba66a Iustin Pop
    node = self.node
3608 b31c8676 Iustin Pop
3609 b31c8676 Iustin Pop
    result = []
3610 c9d443ea Iustin Pop
    changed_mc = False
3611 b31c8676 Iustin Pop
3612 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3613 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3614 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3615 c9d443ea Iustin Pop
      if self.op.offline == True:
3616 c9d443ea Iustin Pop
        if node.master_candidate:
3617 c9d443ea Iustin Pop
          node.master_candidate = False
3618 c9d443ea Iustin Pop
          changed_mc = True
3619 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3620 c9d443ea Iustin Pop
        if node.drained:
3621 c9d443ea Iustin Pop
          node.drained = False
3622 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3623 3a5ba66a Iustin Pop
3624 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3625 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3626 c9d443ea Iustin Pop
      changed_mc = True
3627 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3628 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3629 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3630 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3631 0959c824 Iustin Pop
        if msg:
3632 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3633 b31c8676 Iustin Pop
3634 c9d443ea Iustin Pop
    if self.op.drained is not None:
3635 c9d443ea Iustin Pop
      node.drained = self.op.drained
3636 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3637 c9d443ea Iustin Pop
      if self.op.drained == True:
3638 c9d443ea Iustin Pop
        if node.master_candidate:
3639 c9d443ea Iustin Pop
          node.master_candidate = False
3640 c9d443ea Iustin Pop
          changed_mc = True
3641 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3642 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3643 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3644 dec0d9da Iustin Pop
          if msg:
3645 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3646 c9d443ea Iustin Pop
        if node.offline:
3647 c9d443ea Iustin Pop
          node.offline = False
3648 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3649 c9d443ea Iustin Pop
3650 601908d0 Iustin Pop
    # we locked all nodes, we adjust the CP before updating this node
3651 601908d0 Iustin Pop
    if self.lock_all:
3652 601908d0 Iustin Pop
      _AdjustCandidatePool(self, [node.name])
3653 601908d0 Iustin Pop
3654 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3655 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3656 601908d0 Iustin Pop
3657 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3658 c9d443ea Iustin Pop
    if changed_mc:
3659 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3660 b31c8676 Iustin Pop
3661 b31c8676 Iustin Pop
    return result
3662 b31c8676 Iustin Pop
3663 b31c8676 Iustin Pop
3664 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3665 f5118ade Iustin Pop
  """Powercycles a node.
3666 f5118ade Iustin Pop

3667 f5118ade Iustin Pop
  """
3668 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3669 f5118ade Iustin Pop
  REQ_BGL = False
3670 f5118ade Iustin Pop
3671 f5118ade Iustin Pop
  def CheckArguments(self):
3672 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3673 cf26a87a Iustin Pop
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3674 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3675 5c983ee5 Iustin Pop
                                 " parameter was not set",
3676 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3677 f5118ade Iustin Pop
3678 f5118ade Iustin Pop
  def ExpandNames(self):
3679 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3680 f5118ade Iustin Pop

3681 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3682 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3683 f5118ade Iustin Pop

3684 f5118ade Iustin Pop
    """
3685 f5118ade Iustin Pop
    self.needed_locks = {}
3686 f5118ade Iustin Pop
3687 f5118ade Iustin Pop
  def CheckPrereq(self):
3688 f5118ade Iustin Pop
    """Check prerequisites.
3689 f5118ade Iustin Pop

3690 f5118ade Iustin Pop
    This LU has no prereqs.
3691 f5118ade Iustin Pop

3692 f5118ade Iustin Pop
    """
3693 f5118ade Iustin Pop
    pass
3694 f5118ade Iustin Pop
3695 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3696 f5118ade Iustin Pop
    """Reboots a node.
3697 f5118ade Iustin Pop

3698 f5118ade Iustin Pop
    """
3699 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3700 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3701 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3702 f5118ade Iustin Pop
    return result.payload
3703 f5118ade Iustin Pop
3704 f5118ade Iustin Pop
3705 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3706 a8083063 Iustin Pop
  """Query cluster configuration.
3707 a8083063 Iustin Pop

3708 a8083063 Iustin Pop
  """
3709 a8083063 Iustin Pop
  _OP_REQP = []
3710 642339cf Guido Trotter
  REQ_BGL = False
3711 642339cf Guido Trotter
3712 642339cf Guido Trotter
  def ExpandNames(self):
3713 642339cf Guido Trotter
    self.needed_locks = {}
3714 a8083063 Iustin Pop
3715 a8083063 Iustin Pop
  def CheckPrereq(self):
3716 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3717 a8083063 Iustin Pop

3718 a8083063 Iustin Pop
    """
3719 a8083063 Iustin Pop
    pass
3720 a8083063 Iustin Pop
3721 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3722 a8083063 Iustin Pop
    """Return cluster config.
3723 a8083063 Iustin Pop

3724 a8083063 Iustin Pop
    """
3725 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3726 17463d22 Renรฉ Nussbaumer
    os_hvp = {}
3727 17463d22 Renรฉ Nussbaumer
3728 17463d22 Renรฉ Nussbaumer
    # Filter just for enabled hypervisors
3729 17463d22 Renรฉ Nussbaumer
    for os_name, hv_dict in cluster.os_hvp.items():
3730 17463d22 Renรฉ Nussbaumer
      os_hvp[os_name] = {}
3731 17463d22 Renรฉ Nussbaumer
      for hv_name, hv_params in hv_dict.items():
3732 17463d22 Renรฉ Nussbaumer
        if hv_name in cluster.enabled_hypervisors:
3733 17463d22 Renรฉ Nussbaumer
          os_hvp[os_name][hv_name] = hv_params
3734 17463d22 Renรฉ Nussbaumer
3735 a8083063 Iustin Pop
    result = {
3736 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3737 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3738 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3739 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3740 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3741 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3742 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3743 469f88e1 Iustin Pop
      "master": cluster.master_node,
3744 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3745 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3746 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3747 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3748 17463d22 Renรฉ Nussbaumer
      "os_hvp": os_hvp,
3749 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3750 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3751 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3752 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3753 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3754 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3755 3953242f Iustin Pop
      "maintain_node_health": cluster.maintain_node_health,
3756 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3757 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3758 259578eb Iustin Pop
      "uuid": cluster.uuid,
3759 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3760 1338f2b4 Balazs Lecz
      "uid_pool": cluster.uid_pool,
3761 a8083063 Iustin Pop
      }
3762 a8083063 Iustin Pop
3763 a8083063 Iustin Pop
    return result
3764 a8083063 Iustin Pop
3765 a8083063 Iustin Pop
3766 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3767 ae5849b5 Michael Hanselmann
  """Return configuration values.
3768 a8083063 Iustin Pop

3769 a8083063 Iustin Pop
  """
3770 a8083063 Iustin Pop
  _OP_REQP = []
3771 642339cf Guido Trotter
  REQ_BGL = False
3772 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3773 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3774 05e50653 Michael Hanselmann
                                  "watcher_pause")
3775 642339cf Guido Trotter
3776 642339cf Guido Trotter
  def ExpandNames(self):
3777 642339cf Guido Trotter
    self.needed_locks = {}
3778 a8083063 Iustin Pop
3779 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3780 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3781 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3782 ae5849b5 Michael Hanselmann
3783 a8083063 Iustin Pop
  def CheckPrereq(self):
3784 a8083063 Iustin Pop
    """No prerequisites.
3785 a8083063 Iustin Pop

3786 a8083063 Iustin Pop
    """
3787 a8083063 Iustin Pop
    pass
3788 a8083063 Iustin Pop
3789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3790 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3791 a8083063 Iustin Pop

3792 a8083063 Iustin Pop
    """
3793 ae5849b5 Michael Hanselmann
    values = []
3794 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3795 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3796 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3797 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3798 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3799 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3800 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3801 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3802 cac599f1 Michael Hanselmann
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3803 ae5849b5 Michael Hanselmann
      else:
3804 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3805 3ccafd0e Iustin Pop
      values.append(entry)
3806 ae5849b5 Michael Hanselmann
    return values
3807 a8083063 Iustin Pop
3808 a8083063 Iustin Pop
3809 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3810 a8083063 Iustin Pop
  """Bring up an instance's disks.
3811 a8083063 Iustin Pop

3812 a8083063 Iustin Pop
  """
3813 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3814 f22a8ba3 Guido Trotter
  REQ_BGL = False
3815 f22a8ba3 Guido Trotter
3816 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3817 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3818 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3819 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3820 f22a8ba3 Guido Trotter
3821 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3822 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3823 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3824 a8083063 Iustin Pop
3825 a8083063 Iustin Pop
  def CheckPrereq(self):
3826 a8083063 Iustin Pop
    """Check prerequisites.
3827 a8083063 Iustin Pop

3828 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3829 a8083063 Iustin Pop

3830 a8083063 Iustin Pop
    """
3831 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3832 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3833 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3834 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3835 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3836 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3837 a8083063 Iustin Pop
3838 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3839 a8083063 Iustin Pop
    """Activate the disks.
3840 a8083063 Iustin Pop

3841 a8083063 Iustin Pop
    """
3842 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3843 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3844 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3845 a8083063 Iustin Pop
    if not disks_ok:
3846 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3847 a8083063 Iustin Pop
3848 a8083063 Iustin Pop
    return disks_info
3849 a8083063 Iustin Pop
3850 a8083063 Iustin Pop
3851 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3852 e3443b36 Iustin Pop
                           ignore_size=False):
3853 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3854 a8083063 Iustin Pop

3855 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3856 a8083063 Iustin Pop

3857 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3858 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3859 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3860 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3861 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3862 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3863 e4376078 Iustin Pop
      won't result in an error return from the function
3864 e3443b36 Iustin Pop
  @type ignore_size: boolean
3865 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3866 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3867 e3443b36 Iustin Pop
      when the size is wrong
3868 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3869 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3870 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3871 a8083063 Iustin Pop

3872 a8083063 Iustin Pop
  """
3873 a8083063 Iustin Pop
  device_info = []
3874 a8083063 Iustin Pop
  disks_ok = True
3875 fdbd668d Iustin Pop
  iname = instance.name
3876 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3877 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3878 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3879 fdbd668d Iustin Pop
3880 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3881 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3882 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3883 fdbd668d Iustin Pop
  # SyncSource, etc.)
3884 fdbd668d Iustin Pop
3885 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3886 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3887 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3888 e3443b36 Iustin Pop
      if ignore_size:
3889 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3890 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3891 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3892 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3893 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3894 53c14ef1 Iustin Pop
      if msg:
3895 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3896 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3897 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3898 fdbd668d Iustin Pop
        if not ignore_secondaries:
3899 a8083063 Iustin Pop
          disks_ok = False
3900 fdbd668d Iustin Pop
3901 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3902 fdbd668d Iustin Pop
3903 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3904 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3905 d52ea991 Michael Hanselmann
    dev_path = None
3906 d52ea991 Michael Hanselmann
3907 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3908 fdbd668d Iustin Pop
      if node != instance.primary_node:
3909 fdbd668d Iustin Pop
        continue
3910 e3443b36 Iustin Pop
      if ignore_size:
3911 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3912 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3913 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3914 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3915 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3916 53c14ef1 Iustin Pop
      if msg:
3917 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3918 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3919 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3920 fdbd668d Iustin Pop
        disks_ok = False
3921 d52ea991 Michael Hanselmann
      else:
3922 d52ea991 Michael Hanselmann
        dev_path = result.payload
3923 d52ea991 Michael Hanselmann
3924 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3925 a8083063 Iustin Pop
3926 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3927 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3928 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3929 b352ab5b Iustin Pop
  for disk in instance.disks:
3930 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3931 b352ab5b Iustin Pop
3932 a8083063 Iustin Pop
  return disks_ok, device_info
3933 a8083063 Iustin Pop
3934 a8083063 Iustin Pop
3935 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3936 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3937 3ecf6786 Iustin Pop

3938 3ecf6786 Iustin Pop
  """
3939 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3940 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3941 fe7b0351 Michael Hanselmann
  if not disks_ok:
3942 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3943 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3944 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3945 86d9d3bb Iustin Pop
                         " secondary node,"
3946 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3947 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3948 fe7b0351 Michael Hanselmann
3949 fe7b0351 Michael Hanselmann
3950 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3951 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3952 a8083063 Iustin Pop

3953 a8083063 Iustin Pop
  """
3954 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3955 f22a8ba3 Guido Trotter
  REQ_BGL = False
3956 f22a8ba3 Guido Trotter
3957 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3958 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3959 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3960 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3961 f22a8ba3 Guido Trotter
3962 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3963 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3964 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3965 a8083063 Iustin Pop
3966 a8083063 Iustin Pop
  def CheckPrereq(self):
3967 a8083063 Iustin Pop
    """Check prerequisites.
3968 a8083063 Iustin Pop

3969 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3970 a8083063 Iustin Pop

3971 a8083063 Iustin Pop
    """
3972 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3973 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3974 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3975 a8083063 Iustin Pop
3976 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3977 a8083063 Iustin Pop
    """Deactivate the disks
3978 a8083063 Iustin Pop

3979 a8083063 Iustin Pop
    """
3980 a8083063 Iustin Pop
    instance = self.instance
3981 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3982 a8083063 Iustin Pop
3983 a8083063 Iustin Pop
3984 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3985 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3986 155d6c75 Guido Trotter

3987 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3988 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3989 155d6c75 Guido Trotter

3990 155d6c75 Guido Trotter
  """
3991 31624382 Iustin Pop
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
3992 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3993 a8083063 Iustin Pop
3994 a8083063 Iustin Pop
3995 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3996 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3997 a8083063 Iustin Pop

3998 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3999 a8083063 Iustin Pop

4000 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
4001 a8083063 Iustin Pop
  ignored.
4002 a8083063 Iustin Pop

4003 a8083063 Iustin Pop
  """
4004 cacfd1fd Iustin Pop
  all_result = True
4005 a8083063 Iustin Pop
  for disk in instance.disks:
4006 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4007 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
4008 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4009 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4010 cacfd1fd Iustin Pop
      if msg:
4011 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4012 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
4013 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
4014 cacfd1fd Iustin Pop
          all_result = False
4015 cacfd1fd Iustin Pop
  return all_result
4016 a8083063 Iustin Pop
4017 a8083063 Iustin Pop
4018 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4019 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
4020 d4f16fd9 Iustin Pop

4021 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
4022 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
4023 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
4024 d4f16fd9 Iustin Pop
  exception.
4025 d4f16fd9 Iustin Pop

4026 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
4027 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
4028 e69d05fd Iustin Pop
  @type node: C{str}
4029 e69d05fd Iustin Pop
  @param node: the node to check
4030 e69d05fd Iustin Pop
  @type reason: C{str}
4031 e69d05fd Iustin Pop
  @param reason: string to use in the error message
4032 e69d05fd Iustin Pop
  @type requested: C{int}
4033 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
4034 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
4035 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
4036 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4037 e69d05fd Iustin Pop
      we cannot check the node
4038 d4f16fd9 Iustin Pop

4039 d4f16fd9 Iustin Pop
  """
4040 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4041 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
4042 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
4043 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
4044 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
4045 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4046 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
4047 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
4048 d4f16fd9 Iustin Pop
  if requested > free_mem:
4049 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4050 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
4051 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
4052 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
4053 d4f16fd9 Iustin Pop
4054 d4f16fd9 Iustin Pop
4055 701384a9 Iustin Pop
def _CheckNodesFreeDisk(lu, nodenames, requested):
4056 701384a9 Iustin Pop
  """Checks if nodes have enough free disk space in the default VG.
4057 701384a9 Iustin Pop

4058 701384a9 Iustin Pop
  This function check if all given nodes have the needed amount of
4059 701384a9 Iustin Pop
  free disk. In case any node has less disk or we cannot get the
4060 701384a9 Iustin Pop
  information from the node, this function raise an OpPrereqError
4061 701384a9 Iustin Pop
  exception.
4062 701384a9 Iustin Pop

4063 701384a9 Iustin Pop
  @type lu: C{LogicalUnit}
4064 701384a9 Iustin Pop
  @param lu: a logical unit from which we get configuration data
4065 701384a9 Iustin Pop
  @type nodenames: C{list}
4066 3a488770 Iustin Pop
  @param nodenames: the list of node names to check
4067 701384a9 Iustin Pop
  @type requested: C{int}
4068 701384a9 Iustin Pop
  @param requested: the amount of disk in MiB to check for
4069 701384a9 Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4070 701384a9 Iustin Pop
      we cannot check the node
4071 701384a9 Iustin Pop

4072 701384a9 Iustin Pop
  """
4073 701384a9 Iustin Pop
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4074 701384a9 Iustin Pop
                                   lu.cfg.GetHypervisorType())
4075 701384a9 Iustin Pop
  for node in nodenames:
4076 701384a9 Iustin Pop
    info = nodeinfo[node]
4077 701384a9 Iustin Pop
    info.Raise("Cannot get current information from node %s" % node,
4078 701384a9 Iustin Pop
               prereq=True, ecode=errors.ECODE_ENVIRON)
4079 701384a9 Iustin Pop
    vg_free = info.payload.get("vg_free", None)
4080 701384a9 Iustin Pop
    if not isinstance(vg_free, int):
4081 701384a9 Iustin Pop
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4082 701384a9 Iustin Pop
                                 " result was '%s'" % (node, vg_free),
4083 701384a9 Iustin Pop
                                 errors.ECODE_ENVIRON)
4084 701384a9 Iustin Pop
    if requested > vg_free:
4085 701384a9 Iustin Pop
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4086 701384a9 Iustin Pop
                                 " required %d MiB, available %d MiB" %
4087 701384a9 Iustin Pop
                                 (node, requested, vg_free),
4088 701384a9 Iustin Pop
                                 errors.ECODE_NORES)
4089 701384a9 Iustin Pop
4090 701384a9 Iustin Pop
4091 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
4092 a8083063 Iustin Pop
  """Starts an instance.
4093 a8083063 Iustin Pop

4094 a8083063 Iustin Pop
  """
4095 a8083063 Iustin Pop
  HPATH = "instance-start"
4096 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4097 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
4098 e873317a Guido Trotter
  REQ_BGL = False
4099 e873317a Guido Trotter
4100 e873317a Guido Trotter
  def ExpandNames(self):
4101 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4102 a8083063 Iustin Pop
4103 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4104 a8083063 Iustin Pop
    """Build hooks env.
4105 a8083063 Iustin Pop

4106 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4107 a8083063 Iustin Pop

4108 a8083063 Iustin Pop
    """
4109 a8083063 Iustin Pop
    env = {
4110 a8083063 Iustin Pop
      "FORCE": self.op.force,
4111 a8083063 Iustin Pop
      }
4112 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4113 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4114 a8083063 Iustin Pop
    return env, nl, nl
4115 a8083063 Iustin Pop
4116 a8083063 Iustin Pop
  def CheckPrereq(self):
4117 a8083063 Iustin Pop
    """Check prerequisites.
4118 a8083063 Iustin Pop

4119 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4120 a8083063 Iustin Pop

4121 a8083063 Iustin Pop
    """
4122 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4123 e873317a Guido Trotter
    assert self.instance is not None, \
4124 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4125 a8083063 Iustin Pop
4126 d04aaa2f Iustin Pop
    # extra beparams
4127 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
4128 d04aaa2f Iustin Pop
    if self.beparams:
4129 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
4130 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
4131 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
4132 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4133 d04aaa2f Iustin Pop
      # fill the beparams dict
4134 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
4135 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
4136 d04aaa2f Iustin Pop
4137 d04aaa2f Iustin Pop
    # extra hvparams
4138 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
4139 d04aaa2f Iustin Pop
    if self.hvparams:
4140 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
4141 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
4142 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
4143 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4144 d04aaa2f Iustin Pop
4145 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
4146 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4147 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
4148 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
4149 d04aaa2f Iustin Pop
                                    instance.hvparams)
4150 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
4151 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4152 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
4153 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4154 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
4155 d04aaa2f Iustin Pop
4156 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4157 7527a8a4 Iustin Pop
4158 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4159 5bbd3f7f Michael Hanselmann
    # check bridges existence
4160 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4161 a8083063 Iustin Pop
4162 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4163 f1926756 Guido Trotter
                                              instance.name,
4164 f1926756 Guido Trotter
                                              instance.hypervisor)
4165 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4166 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4167 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
4168 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
4169 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
4170 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
4171 d4f16fd9 Iustin Pop
4172 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4173 a8083063 Iustin Pop
    """Start the instance.
4174 a8083063 Iustin Pop

4175 a8083063 Iustin Pop
    """
4176 a8083063 Iustin Pop
    instance = self.instance
4177 a8083063 Iustin Pop
    force = self.op.force
4178 a8083063 Iustin Pop
4179 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
4180 fe482621 Iustin Pop
4181 a8083063 Iustin Pop
    node_current = instance.primary_node
4182 a8083063 Iustin Pop
4183 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
4184 a8083063 Iustin Pop
4185 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
4186 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
4187 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4188 dd279568 Iustin Pop
    if msg:
4189 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4190 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
4191 a8083063 Iustin Pop
4192 a8083063 Iustin Pop
4193 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
4194 bf6929a2 Alexander Schreiber
  """Reboot an instance.
4195 bf6929a2 Alexander Schreiber

4196 bf6929a2 Alexander Schreiber
  """
4197 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
4198 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
4199 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
4200 e873317a Guido Trotter
  REQ_BGL = False
4201 e873317a Guido Trotter
4202 17c3f802 Guido Trotter
  def CheckArguments(self):
4203 17c3f802 Guido Trotter
    """Check the arguments.
4204 17c3f802 Guido Trotter

4205 17c3f802 Guido Trotter
    """
4206 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4207 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4208 17c3f802 Guido Trotter
4209 e873317a Guido Trotter
  def ExpandNames(self):
4210 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
4211 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4212 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
4213 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
4214 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
4215 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4216 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
4217 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4218 bf6929a2 Alexander Schreiber
4219 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
4220 bf6929a2 Alexander Schreiber
    """Build hooks env.
4221 bf6929a2 Alexander Schreiber

4222 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
4223 bf6929a2 Alexander Schreiber

4224 bf6929a2 Alexander Schreiber
    """
4225 bf6929a2 Alexander Schreiber
    env = {
4226 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4227 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
4228 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4229 bf6929a2 Alexander Schreiber
      }
4230 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4231 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4232 bf6929a2 Alexander Schreiber
    return env, nl, nl
4233 bf6929a2 Alexander Schreiber
4234 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
4235 bf6929a2 Alexander Schreiber
    """Check prerequisites.
4236 bf6929a2 Alexander Schreiber

4237 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
4238 bf6929a2 Alexander Schreiber

4239 bf6929a2 Alexander Schreiber
    """
4240 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4241 e873317a Guido Trotter
    assert self.instance is not None, \
4242 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4243 bf6929a2 Alexander Schreiber
4244 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4245 7527a8a4 Iustin Pop
4246 5bbd3f7f Michael Hanselmann
    # check bridges existence
4247 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4248 bf6929a2 Alexander Schreiber
4249 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
4250 bf6929a2 Alexander Schreiber
    """Reboot the instance.
4251 bf6929a2 Alexander Schreiber

4252 bf6929a2 Alexander Schreiber
    """
4253 bf6929a2 Alexander Schreiber
    instance = self.instance
4254 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
4255 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
4256 bf6929a2 Alexander Schreiber
4257 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
4258 bf6929a2 Alexander Schreiber
4259 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4260 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
4261 ae48ac32 Iustin Pop
      for disk in instance.disks:
4262 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
4263 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
4264 17c3f802 Guido Trotter
                                             reboot_type,
4265 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4266 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
4267 bf6929a2 Alexander Schreiber
    else:
4268 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
4269 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
4270 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
4271 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4272 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
4273 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4274 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4275 dd279568 Iustin Pop
      if msg:
4276 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4277 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
4278 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
4279 bf6929a2 Alexander Schreiber
4280 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
4281 bf6929a2 Alexander Schreiber
4282 bf6929a2 Alexander Schreiber
4283 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
4284 a8083063 Iustin Pop
  """Shutdown an instance.
4285 a8083063 Iustin Pop

4286 a8083063 Iustin Pop
  """
4287 a8083063 Iustin Pop
  HPATH = "instance-stop"
4288 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4289 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4290 e873317a Guido Trotter
  REQ_BGL = False
4291 e873317a Guido Trotter
4292 6263189c Guido Trotter
  def CheckArguments(self):
4293 6263189c Guido Trotter
    """Check the arguments.
4294 6263189c Guido Trotter

4295 6263189c Guido Trotter
    """
4296 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
4297 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
4298 6263189c Guido Trotter
4299 e873317a Guido Trotter
  def ExpandNames(self):
4300 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4301 a8083063 Iustin Pop
4302 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4303 a8083063 Iustin Pop
    """Build hooks env.
4304 a8083063 Iustin Pop

4305 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4306 a8083063 Iustin Pop

4307 a8083063 Iustin Pop
    """
4308 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4309 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
4310 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4311 a8083063 Iustin Pop
    return env, nl, nl
4312 a8083063 Iustin Pop
4313 a8083063 Iustin Pop
  def CheckPrereq(self):
4314 a8083063 Iustin Pop
    """Check prerequisites.
4315 a8083063 Iustin Pop

4316 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4317 a8083063 Iustin Pop

4318 a8083063 Iustin Pop
    """
4319 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4320 e873317a Guido Trotter
    assert self.instance is not None, \
4321 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4322 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
4323 a8083063 Iustin Pop
4324 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4325 a8083063 Iustin Pop
    """Shutdown the instance.
4326 a8083063 Iustin Pop

4327 a8083063 Iustin Pop
    """
4328 a8083063 Iustin Pop
    instance = self.instance
4329 a8083063 Iustin Pop
    node_current = instance.primary_node
4330 6263189c Guido Trotter
    timeout = self.timeout
4331 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
4332 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4333 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4334 1fae010f Iustin Pop
    if msg:
4335 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4336 a8083063 Iustin Pop
4337 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
4338 a8083063 Iustin Pop
4339 a8083063 Iustin Pop
4340 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
4341 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
4342 fe7b0351 Michael Hanselmann

4343 fe7b0351 Michael Hanselmann
  """
4344 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
4345 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
4346 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
4347 4e0b4d2d Guido Trotter
  REQ_BGL = False
4348 4e0b4d2d Guido Trotter
4349 4e0b4d2d Guido Trotter
  def ExpandNames(self):
4350 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
4351 fe7b0351 Michael Hanselmann
4352 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
4353 fe7b0351 Michael Hanselmann
    """Build hooks env.
4354 fe7b0351 Michael Hanselmann

4355 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
4356 fe7b0351 Michael Hanselmann

4357 fe7b0351 Michael Hanselmann
    """
4358 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4359 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4360 fe7b0351 Michael Hanselmann
    return env, nl, nl
4361 fe7b0351 Michael Hanselmann
4362 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
4363 fe7b0351 Michael Hanselmann
    """Check prerequisites.
4364 fe7b0351 Michael Hanselmann

4365 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
4366 fe7b0351 Michael Hanselmann

4367 fe7b0351 Michael Hanselmann
    """
4368 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4369 4e0b4d2d Guido Trotter
    assert instance is not None, \
4370 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4371 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4372 4e0b4d2d Guido Trotter
4373 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
4374 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4375 5c983ee5 Iustin Pop
                                 self.op.instance_name,
4376 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
4377 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot reinstall")
4378 d0834de3 Michael Hanselmann
4379 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
4380 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
4381 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4382 d0834de3 Michael Hanselmann
      # OS verification
4383 cf26a87a Iustin Pop
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4384 231cd901 Iustin Pop
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4385 d0834de3 Michael Hanselmann
4386 fe7b0351 Michael Hanselmann
    self.instance = instance
4387 fe7b0351 Michael Hanselmann
4388 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
4389 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
4390 fe7b0351 Michael Hanselmann

4391 fe7b0351 Michael Hanselmann
    """
4392 fe7b0351 Michael Hanselmann
    inst = self.instance
4393 fe7b0351 Michael Hanselmann
4394 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4395 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4396 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
4397 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
4398 d0834de3 Michael Hanselmann
4399 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4400 fe7b0351 Michael Hanselmann
    try:
4401 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
4402 4a0e011f Iustin Pop
      # FIXME: pass debug option from opcode to backend
4403 dd713605 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4404 dd713605 Iustin Pop
                                             self.op.debug_level)
4405 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
4406 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
4407 fe7b0351 Michael Hanselmann
    finally:
4408 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4409 fe7b0351 Michael Hanselmann
4410 fe7b0351 Michael Hanselmann
4411 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
4412 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
4413 bd315bfa Iustin Pop

4414 bd315bfa Iustin Pop
  """
4415 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
4416 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4417 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
4418 bd315bfa Iustin Pop
  REQ_BGL = False
4419 bd315bfa Iustin Pop
4420 bd315bfa Iustin Pop
  def CheckArguments(self):
4421 bd315bfa Iustin Pop
    """Check the arguments.
4422 bd315bfa Iustin Pop

4423 bd315bfa Iustin Pop
    """
4424 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
4425 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4426 bd315bfa Iustin Pop
    for item in self.op.disks:
4427 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
4428 bd315bfa Iustin Pop
          item < 0):
4429 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4430 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
4431 bd315bfa Iustin Pop
4432 bd315bfa Iustin Pop
  def ExpandNames(self):
4433 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
4434 bd315bfa Iustin Pop
4435 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
4436 bd315bfa Iustin Pop
    """Build hooks env.
4437 bd315bfa Iustin Pop

4438 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4439 bd315bfa Iustin Pop

4440 bd315bfa Iustin Pop
    """
4441 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4442 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4443 bd315bfa Iustin Pop
    return env, nl, nl
4444 bd315bfa Iustin Pop
4445 bd315bfa Iustin Pop
  def CheckPrereq(self):
4446 bd315bfa Iustin Pop
    """Check prerequisites.
4447 bd315bfa Iustin Pop

4448 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
4449 bd315bfa Iustin Pop

4450 bd315bfa Iustin Pop
    """
4451 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4452 bd315bfa Iustin Pop
    assert instance is not None, \
4453 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4454 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4455 bd315bfa Iustin Pop
4456 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
4457 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4458 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
4459 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4460 bd315bfa Iustin Pop
4461 bd315bfa Iustin Pop
    if not self.op.disks:
4462 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
4463 bd315bfa Iustin Pop
    else:
4464 bd315bfa Iustin Pop
      for idx in self.op.disks:
4465 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
4466 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4467 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
4468 bd315bfa Iustin Pop
4469 bd315bfa Iustin Pop
    self.instance = instance
4470 bd315bfa Iustin Pop
4471 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
4472 bd315bfa Iustin Pop
    """Recreate the disks.
4473 bd315bfa Iustin Pop

4474 bd315bfa Iustin Pop
    """
4475 bd315bfa Iustin Pop
    to_skip = []
4476 1122eb25 Iustin Pop
    for idx, _ in enumerate(self.instance.disks):
4477 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
4478 bd315bfa Iustin Pop
        to_skip.append(idx)
4479 bd315bfa Iustin Pop
        continue
4480 bd315bfa Iustin Pop
4481 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
4482 bd315bfa Iustin Pop
4483 bd315bfa Iustin Pop
4484 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
4485 decd5f45 Iustin Pop
  """Rename an instance.
4486 decd5f45 Iustin Pop

4487 decd5f45 Iustin Pop
  """
4488 decd5f45 Iustin Pop
  HPATH = "instance-rename"
4489 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4490 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
4491 decd5f45 Iustin Pop
4492 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
4493 decd5f45 Iustin Pop
    """Build hooks env.
4494 decd5f45 Iustin Pop

4495 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4496 decd5f45 Iustin Pop

4497 decd5f45 Iustin Pop
    """
4498 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4499 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4500 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4501 decd5f45 Iustin Pop
    return env, nl, nl
4502 decd5f45 Iustin Pop
4503 decd5f45 Iustin Pop
  def CheckPrereq(self):
4504 decd5f45 Iustin Pop
    """Check prerequisites.
4505 decd5f45 Iustin Pop

4506 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4507 decd5f45 Iustin Pop

4508 decd5f45 Iustin Pop
    """
4509 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4510 cf26a87a Iustin Pop
                                                self.op.instance_name)
4511 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4512 cf26a87a Iustin Pop
    assert instance is not None
4513 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4514 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot rename")
4515 decd5f45 Iustin Pop
    self.instance = instance
4516 decd5f45 Iustin Pop
4517 decd5f45 Iustin Pop
    # new name verification
4518 104f4ca1 Iustin Pop
    name_info = utils.GetHostInfo(self.op.new_name)
4519 decd5f45 Iustin Pop
4520 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4521 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4522 7bde3275 Guido Trotter
    if new_name in instance_list:
4523 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4524 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4525 7bde3275 Guido Trotter
4526 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4527 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4528 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4529 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4530 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4531 decd5f45 Iustin Pop
4532 decd5f45 Iustin Pop
4533 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4534 decd5f45 Iustin Pop
    """Reinstall the instance.
4535 decd5f45 Iustin Pop

4536 decd5f45 Iustin Pop
    """
4537 decd5f45 Iustin Pop
    inst = self.instance
4538 decd5f45 Iustin Pop
    old_name = inst.name
4539 decd5f45 Iustin Pop
4540 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4541 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4542 b23c4333 Manuel Franceschini
4543 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4544 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4545 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4546 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4547 decd5f45 Iustin Pop
4548 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4549 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4550 decd5f45 Iustin Pop
4551 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4552 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4553 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4554 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4555 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4556 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4557 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4558 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4559 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4560 b23c4333 Manuel Franceschini
4561 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4562 decd5f45 Iustin Pop
    try:
4563 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4564 dd713605 Iustin Pop
                                                 old_name, self.op.debug_level)
4565 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4566 96841384 Iustin Pop
      if msg:
4567 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4568 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4569 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4570 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4571 decd5f45 Iustin Pop
    finally:
4572 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4573 decd5f45 Iustin Pop
4574 decd5f45 Iustin Pop
4575 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4576 a8083063 Iustin Pop
  """Remove an instance.
4577 a8083063 Iustin Pop

4578 a8083063 Iustin Pop
  """
4579 a8083063 Iustin Pop
  HPATH = "instance-remove"
4580 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4581 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4582 cf472233 Guido Trotter
  REQ_BGL = False
4583 cf472233 Guido Trotter
4584 17c3f802 Guido Trotter
  def CheckArguments(self):
4585 17c3f802 Guido Trotter
    """Check the arguments.
4586 17c3f802 Guido Trotter

4587 17c3f802 Guido Trotter
    """
4588 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4589 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4590 17c3f802 Guido Trotter
4591 cf472233 Guido Trotter
  def ExpandNames(self):
4592 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4593 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4594 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4595 cf472233 Guido Trotter
4596 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4597 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4598 cf472233 Guido Trotter
      self._LockInstancesNodes()
4599 a8083063 Iustin Pop
4600 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4601 a8083063 Iustin Pop
    """Build hooks env.
4602 a8083063 Iustin Pop

4603 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4604 a8083063 Iustin Pop

4605 a8083063 Iustin Pop
    """
4606 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4607 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4608 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4609 abd8e836 Iustin Pop
    nl_post = list(self.instance.all_nodes) + nl
4610 abd8e836 Iustin Pop
    return env, nl, nl_post
4611 a8083063 Iustin Pop
4612 a8083063 Iustin Pop
  def CheckPrereq(self):
4613 a8083063 Iustin Pop
    """Check prerequisites.
4614 a8083063 Iustin Pop

4615 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4616 a8083063 Iustin Pop

4617 a8083063 Iustin Pop
    """
4618 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4619 cf472233 Guido Trotter
    assert self.instance is not None, \
4620 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4621 a8083063 Iustin Pop
4622 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4623 a8083063 Iustin Pop
    """Remove the instance.
4624 a8083063 Iustin Pop

4625 a8083063 Iustin Pop
    """
4626 a8083063 Iustin Pop
    instance = self.instance
4627 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4628 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4629 a8083063 Iustin Pop
4630 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4631 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4632 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4633 1fae010f Iustin Pop
    if msg:
4634 1d67656e Iustin Pop
      if self.op.ignore_failures:
4635 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4636 1d67656e Iustin Pop
      else:
4637 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4638 1fae010f Iustin Pop
                                 " node %s: %s" %
4639 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4640 a8083063 Iustin Pop
4641 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4642 a8083063 Iustin Pop
4643 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4644 1d67656e Iustin Pop
      if self.op.ignore_failures:
4645 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4646 1d67656e Iustin Pop
      else:
4647 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4648 a8083063 Iustin Pop
4649 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4650 a8083063 Iustin Pop
4651 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4652 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4653 a8083063 Iustin Pop
4654 a8083063 Iustin Pop
4655 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4656 a8083063 Iustin Pop
  """Logical unit for querying instances.
4657 a8083063 Iustin Pop

4658 a8083063 Iustin Pop
  """
4659 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
4660 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4661 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4662 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4663 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4664 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4665 5b460366 Iustin Pop
                                    "admin_state",
4666 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4667 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4668 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4669 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4670 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4671 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4672 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4673 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4674 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4675 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4676 19bed813 Iustin Pop
                                    "hvparams",
4677 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4678 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4679 7736a5f2 Iustin Pop
                                   for name in constants.HVS_PARAMETERS
4680 7736a5f2 Iustin Pop
                                   if name not in constants.HVC_GLOBALS] +
4681 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4682 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4683 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4684 31bf511f Iustin Pop
4685 a8083063 Iustin Pop
4686 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4687 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4688 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4689 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4690 a8083063 Iustin Pop
4691 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4692 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4693 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4694 7eb9d8f7 Guido Trotter
4695 57a2fb91 Iustin Pop
    if self.op.names:
4696 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4697 7eb9d8f7 Guido Trotter
    else:
4698 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4699 7eb9d8f7 Guido Trotter
4700 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4701 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4702 57a2fb91 Iustin Pop
    if self.do_locking:
4703 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4704 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4705 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4706 7eb9d8f7 Guido Trotter
4707 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4708 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4709 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4710 7eb9d8f7 Guido Trotter
4711 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4712 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4713 7eb9d8f7 Guido Trotter

4714 7eb9d8f7 Guido Trotter
    """
4715 57a2fb91 Iustin Pop
    pass
4716 069dcc86 Iustin Pop
4717 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4718 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4719 a8083063 Iustin Pop

4720 a8083063 Iustin Pop
    """
4721 7260cfbe Iustin Pop
    # pylint: disable-msg=R0912
4722 7260cfbe Iustin Pop
    # way too many branches here
4723 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4724 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4725 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4726 a7f5dc98 Iustin Pop
      if self.do_locking:
4727 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4728 a7f5dc98 Iustin Pop
      else:
4729 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4730 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4731 57a2fb91 Iustin Pop
    else:
4732 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4733 a7f5dc98 Iustin Pop
      if self.do_locking:
4734 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4735 a7f5dc98 Iustin Pop
      else:
4736 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4737 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4738 a7f5dc98 Iustin Pop
      if missing:
4739 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4740 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4741 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4742 c1f1cbb2 Iustin Pop
4743 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4744 a8083063 Iustin Pop
4745 a8083063 Iustin Pop
    # begin data gathering
4746 a8083063 Iustin Pop
4747 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4748 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4749 a8083063 Iustin Pop
4750 a8083063 Iustin Pop
    bad_nodes = []
4751 cbfc4681 Iustin Pop
    off_nodes = []
4752 ec79568d Iustin Pop
    if self.do_node_query:
4753 a8083063 Iustin Pop
      live_data = {}
4754 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4755 a8083063 Iustin Pop
      for name in nodes:
4756 a8083063 Iustin Pop
        result = node_data[name]
4757 cbfc4681 Iustin Pop
        if result.offline:
4758 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4759 cbfc4681 Iustin Pop
          off_nodes.append(name)
4760 3cebe102 Michael Hanselmann
        if result.fail_msg:
4761 a8083063 Iustin Pop
          bad_nodes.append(name)
4762 781de953 Iustin Pop
        else:
4763 2fa74ef4 Iustin Pop
          if result.payload:
4764 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4765 2fa74ef4 Iustin Pop
          # else no instance is alive
4766 a8083063 Iustin Pop
    else:
4767 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4768 a8083063 Iustin Pop
4769 a8083063 Iustin Pop
    # end data gathering
4770 a8083063 Iustin Pop
4771 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4772 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4773 a8083063 Iustin Pop
    output = []
4774 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4775 a8083063 Iustin Pop
    for instance in instance_list:
4776 a8083063 Iustin Pop
      iout = []
4777 7736a5f2 Iustin Pop
      i_hv = cluster.FillHV(instance, skip_globals=True)
4778 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4779 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4780 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4781 a8083063 Iustin Pop
      for field in self.op.output_fields:
4782 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4783 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4784 19bed813 Iustin Pop
          val = getattr(instance, field)
4785 a8083063 Iustin Pop
        elif field == "pnode":
4786 a8083063 Iustin Pop
          val = instance.primary_node
4787 a8083063 Iustin Pop
        elif field == "snodes":
4788 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4789 a8083063 Iustin Pop
        elif field == "admin_state":
4790 0d68c45d Iustin Pop
          val = instance.admin_up
4791 a8083063 Iustin Pop
        elif field == "oper_state":
4792 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4793 8a23d2d3 Iustin Pop
            val = None
4794 a8083063 Iustin Pop
          else:
4795 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4796 d8052456 Iustin Pop
        elif field == "status":
4797 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4798 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4799 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4800 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4801 d8052456 Iustin Pop
          else:
4802 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4803 d8052456 Iustin Pop
            if running:
4804 0d68c45d Iustin Pop
              if instance.admin_up:
4805 d8052456 Iustin Pop
                val = "running"
4806 d8052456 Iustin Pop
              else:
4807 d8052456 Iustin Pop
                val = "ERROR_up"
4808 d8052456 Iustin Pop
            else:
4809 0d68c45d Iustin Pop
              if instance.admin_up:
4810 d8052456 Iustin Pop
                val = "ERROR_down"
4811 d8052456 Iustin Pop
              else:
4812 d8052456 Iustin Pop
                val = "ADMIN_down"
4813 a8083063 Iustin Pop
        elif field == "oper_ram":
4814 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4815 8a23d2d3 Iustin Pop
            val = None
4816 a8083063 Iustin Pop
          elif instance.name in live_data:
4817 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4818 a8083063 Iustin Pop
          else:
4819 a8083063 Iustin Pop
            val = "-"
4820 c1ce76bb Iustin Pop
        elif field == "vcpus":
4821 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4822 a8083063 Iustin Pop
        elif field == "disk_template":
4823 a8083063 Iustin Pop
          val = instance.disk_template
4824 a8083063 Iustin Pop
        elif field == "ip":
4825 39a02558 Guido Trotter
          if instance.nics:
4826 39a02558 Guido Trotter
            val = instance.nics[0].ip
4827 39a02558 Guido Trotter
          else:
4828 39a02558 Guido Trotter
            val = None
4829 638c6349 Guido Trotter
        elif field == "nic_mode":
4830 638c6349 Guido Trotter
          if instance.nics:
4831 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4832 638c6349 Guido Trotter
          else:
4833 638c6349 Guido Trotter
            val = None
4834 638c6349 Guido Trotter
        elif field == "nic_link":
4835 39a02558 Guido Trotter
          if instance.nics:
4836 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4837 638c6349 Guido Trotter
          else:
4838 638c6349 Guido Trotter
            val = None
4839 638c6349 Guido Trotter
        elif field == "bridge":
4840 638c6349 Guido Trotter
          if (instance.nics and
4841 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4842 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4843 39a02558 Guido Trotter
          else:
4844 39a02558 Guido Trotter
            val = None
4845 a8083063 Iustin Pop
        elif field == "mac":
4846 39a02558 Guido Trotter
          if instance.nics:
4847 39a02558 Guido Trotter
            val = instance.nics[0].mac
4848 39a02558 Guido Trotter
          else:
4849 39a02558 Guido Trotter
            val = None
4850 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4851 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4852 ad24e046 Iustin Pop
          try:
4853 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4854 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4855 8a23d2d3 Iustin Pop
            val = None
4856 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4857 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4858 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4859 130a6a6f Iustin Pop
        elif field == "tags":
4860 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4861 338e51e8 Iustin Pop
        elif field == "hvparams":
4862 338e51e8 Iustin Pop
          val = i_hv
4863 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4864 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4865 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4866 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4867 338e51e8 Iustin Pop
        elif field == "beparams":
4868 338e51e8 Iustin Pop
          val = i_be
4869 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4870 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4871 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4872 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4873 71c1af58 Iustin Pop
          # matches a variable list
4874 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4875 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4876 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4877 71c1af58 Iustin Pop
              val = len(instance.disks)
4878 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4879 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4880 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4881 3e0cea06 Iustin Pop
              try:
4882 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4883 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4884 71c1af58 Iustin Pop
                val = None
4885 71c1af58 Iustin Pop
            else:
4886 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4887 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4888 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4889 71c1af58 Iustin Pop
              val = len(instance.nics)
4890 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4891 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4892 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4893 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4894 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4895 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4896 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4897 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4898 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4899 638c6349 Guido Trotter
              val = []
4900 638c6349 Guido Trotter
              for nicp in i_nicp:
4901 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4902 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4903 638c6349 Guido Trotter
                else:
4904 638c6349 Guido Trotter
                  val.append(None)
4905 71c1af58 Iustin Pop
            else:
4906 71c1af58 Iustin Pop
              # index-based item
4907 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4908 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4909 71c1af58 Iustin Pop
                val = None
4910 71c1af58 Iustin Pop
              else:
4911 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4912 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4913 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4914 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4915 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4916 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4917 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4918 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4919 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4920 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4921 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4922 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4923 638c6349 Guido Trotter
                  else:
4924 638c6349 Guido Trotter
                    val = None
4925 71c1af58 Iustin Pop
                else:
4926 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4927 71c1af58 Iustin Pop
          else:
4928 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4929 c1ce76bb Iustin Pop
                           field)
4930 a8083063 Iustin Pop
        else:
4931 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4932 a8083063 Iustin Pop
        iout.append(val)
4933 a8083063 Iustin Pop
      output.append(iout)
4934 a8083063 Iustin Pop
4935 a8083063 Iustin Pop
    return output
4936 a8083063 Iustin Pop
4937 a8083063 Iustin Pop
4938 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4939 a8083063 Iustin Pop
  """Failover an instance.
4940 a8083063 Iustin Pop

4941 a8083063 Iustin Pop
  """
4942 a8083063 Iustin Pop
  HPATH = "instance-failover"
4943 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4944 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4945 c9e5c064 Guido Trotter
  REQ_BGL = False
4946 c9e5c064 Guido Trotter
4947 17c3f802 Guido Trotter
  def CheckArguments(self):
4948 17c3f802 Guido Trotter
    """Check the arguments.
4949 17c3f802 Guido Trotter

4950 17c3f802 Guido Trotter
    """
4951 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4952 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4953 17c3f802 Guido Trotter
4954 c9e5c064 Guido Trotter
  def ExpandNames(self):
4955 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4956 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4957 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4958 c9e5c064 Guido Trotter
4959 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4960 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4961 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4962 a8083063 Iustin Pop
4963 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4964 a8083063 Iustin Pop
    """Build hooks env.
4965 a8083063 Iustin Pop

4966 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4967 a8083063 Iustin Pop

4968 a8083063 Iustin Pop
    """
4969 08eec276 Iustin Pop
    instance = self.instance
4970 08eec276 Iustin Pop
    source_node = instance.primary_node
4971 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4972 a8083063 Iustin Pop
    env = {
4973 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4974 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4975 08eec276 Iustin Pop
      "OLD_PRIMARY": source_node,
4976 08eec276 Iustin Pop
      "OLD_SECONDARY": target_node,
4977 08eec276 Iustin Pop
      "NEW_PRIMARY": target_node,
4978 08eec276 Iustin Pop
      "NEW_SECONDARY": source_node,
4979 a8083063 Iustin Pop
      }
4980 08eec276 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4981 08eec276 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4982 abd8e836 Iustin Pop
    nl_post = list(nl)
4983 abd8e836 Iustin Pop
    nl_post.append(source_node)
4984 abd8e836 Iustin Pop
    return env, nl, nl_post
4985 a8083063 Iustin Pop
4986 a8083063 Iustin Pop
  def CheckPrereq(self):
4987 a8083063 Iustin Pop
    """Check prerequisites.
4988 a8083063 Iustin Pop

4989 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4990 a8083063 Iustin Pop

4991 a8083063 Iustin Pop
    """
4992 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4993 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4994 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4995 a8083063 Iustin Pop
4996 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4997 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4998 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4999 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
5000 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5001 2a710df1 Michael Hanselmann
5002 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
5003 2a710df1 Michael Hanselmann
    if not secondary_nodes:
5004 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
5005 abdf0113 Iustin Pop
                                   "a mirrored disk template")
5006 2a710df1 Michael Hanselmann
5007 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
5008 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
5009 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5010 d27776f0 Iustin Pop
    if instance.admin_up:
5011 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
5012 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5013 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5014 d27776f0 Iustin Pop
                           instance.hypervisor)
5015 d27776f0 Iustin Pop
    else:
5016 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5017 d27776f0 Iustin Pop
                   " instance will not be started")
5018 3a7c308e Guido Trotter
5019 a8083063 Iustin Pop
    # check bridge existance
5020 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5021 a8083063 Iustin Pop
5022 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5023 a8083063 Iustin Pop
    """Failover an instance.
5024 a8083063 Iustin Pop

5025 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
5026 a8083063 Iustin Pop
    starting it on the secondary.
5027 a8083063 Iustin Pop

5028 a8083063 Iustin Pop
    """
5029 a8083063 Iustin Pop
    instance = self.instance
5030 a8083063 Iustin Pop
5031 a8083063 Iustin Pop
    source_node = instance.primary_node
5032 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
5033 a8083063 Iustin Pop
5034 1df79ce6 Michael Hanselmann
    if instance.admin_up:
5035 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
5036 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
5037 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
5038 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
5039 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
5040 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
5041 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
5042 1df79ce6 Michael Hanselmann
    else:
5043 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
5044 a8083063 Iustin Pop
5045 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
5046 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
5047 9a4f63d1 Iustin Pop
                 instance.name, source_node)
5048 a8083063 Iustin Pop
5049 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5050 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5051 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5052 1fae010f Iustin Pop
    if msg:
5053 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
5054 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5055 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
5056 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
5057 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
5058 24a40d57 Iustin Pop
      else:
5059 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5060 1fae010f Iustin Pop
                                 " node %s: %s" %
5061 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
5062 a8083063 Iustin Pop
5063 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
5064 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5065 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
5066 a8083063 Iustin Pop
5067 a8083063 Iustin Pop
    instance.primary_node = target_node
5068 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
5069 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5070 a8083063 Iustin Pop
5071 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
5072 0d68c45d Iustin Pop
    if instance.admin_up:
5073 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
5074 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
5075 9a4f63d1 Iustin Pop
                   instance.name, target_node)
5076 12a0cfbe Guido Trotter
5077 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5078 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
5079 12a0cfbe Guido Trotter
      if not disks_ok:
5080 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5081 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
5082 a8083063 Iustin Pop
5083 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
5084 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5085 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5086 dd279568 Iustin Pop
      if msg:
5087 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5088 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5089 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
5090 a8083063 Iustin Pop
5091 a8083063 Iustin Pop
5092 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
5093 53c776b5 Iustin Pop
  """Migrate an instance.
5094 53c776b5 Iustin Pop

5095 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
5096 53c776b5 Iustin Pop
  which is done with shutdown.
5097 53c776b5 Iustin Pop

5098 53c776b5 Iustin Pop
  """
5099 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
5100 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5101 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
5102 53c776b5 Iustin Pop
5103 53c776b5 Iustin Pop
  REQ_BGL = False
5104 53c776b5 Iustin Pop
5105 53c776b5 Iustin Pop
  def ExpandNames(self):
5106 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
5107 3e06e001 Michael Hanselmann
5108 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5109 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5110 53c776b5 Iustin Pop
5111 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5112 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
5113 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
5114 3e06e001 Michael Hanselmann
5115 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
5116 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
5117 53c776b5 Iustin Pop
      self._LockInstancesNodes()
5118 53c776b5 Iustin Pop
5119 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
5120 53c776b5 Iustin Pop
    """Build hooks env.
5121 53c776b5 Iustin Pop

5122 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5123 53c776b5 Iustin Pop

5124 53c776b5 Iustin Pop
    """
5125 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
5126 08eec276 Iustin Pop
    source_node = instance.primary_node
5127 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
5128 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
5129 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
5130 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5131 08eec276 Iustin Pop
    env.update({
5132 08eec276 Iustin Pop
        "OLD_PRIMARY": source_node,
5133 08eec276 Iustin Pop
        "OLD_SECONDARY": target_node,
5134 08eec276 Iustin Pop
        "NEW_PRIMARY": target_node,
5135 08eec276 Iustin Pop
        "NEW_SECONDARY": source_node,
5136 08eec276 Iustin Pop
        })
5137 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5138 abd8e836 Iustin Pop
    nl_post = list(nl)
5139 abd8e836 Iustin Pop
    nl_post.append(source_node)
5140 abd8e836 Iustin Pop
    return env, nl, nl_post
5141 53c776b5 Iustin Pop
5142 3e06e001 Michael Hanselmann
5143 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
5144 313bcead Iustin Pop
  """Move an instance by data-copying.
5145 313bcead Iustin Pop

5146 313bcead Iustin Pop
  """
5147 313bcead Iustin Pop
  HPATH = "instance-move"
5148 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5149 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
5150 313bcead Iustin Pop
  REQ_BGL = False
5151 313bcead Iustin Pop
5152 17c3f802 Guido Trotter
  def CheckArguments(self):
5153 17c3f802 Guido Trotter
    """Check the arguments.
5154 17c3f802 Guido Trotter

5155 17c3f802 Guido Trotter
    """
5156 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5157 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5158 17c3f802 Guido Trotter
5159 313bcead Iustin Pop
  def ExpandNames(self):
5160 313bcead Iustin Pop
    self._ExpandAndLockInstance()
5161 cf26a87a Iustin Pop
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5162 313bcead Iustin Pop
    self.op.target_node = target_node
5163 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5164 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5165 313bcead Iustin Pop
5166 313bcead Iustin Pop
  def DeclareLocks(self, level):
5167 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
5168 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
5169 313bcead Iustin Pop
5170 313bcead Iustin Pop
  def BuildHooksEnv(self):
5171 313bcead Iustin Pop
    """Build hooks env.
5172 313bcead Iustin Pop

5173 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5174 313bcead Iustin Pop

5175 313bcead Iustin Pop
    """
5176 313bcead Iustin Pop
    env = {
5177 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
5178 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5179 313bcead Iustin Pop
      }
5180 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5181 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5182 313bcead Iustin Pop
                                       self.op.target_node]
5183 313bcead Iustin Pop
    return env, nl, nl
5184 313bcead Iustin Pop
5185 313bcead Iustin Pop
  def CheckPrereq(self):
5186 313bcead Iustin Pop
    """Check prerequisites.
5187 313bcead Iustin Pop

5188 313bcead Iustin Pop
    This checks that the instance is in the cluster.
5189 313bcead Iustin Pop

5190 313bcead Iustin Pop
    """
5191 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5192 313bcead Iustin Pop
    assert self.instance is not None, \
5193 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
5194 313bcead Iustin Pop
5195 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
5196 313bcead Iustin Pop
    assert node is not None, \
5197 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
5198 313bcead Iustin Pop
5199 313bcead Iustin Pop
    self.target_node = target_node = node.name
5200 313bcead Iustin Pop
5201 313bcead Iustin Pop
    if target_node == instance.primary_node:
5202 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5203 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
5204 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5205 313bcead Iustin Pop
5206 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5207 313bcead Iustin Pop
5208 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
5209 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5210 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5211 d1b83918 Iustin Pop
                                   " cannot copy" % idx, errors.ECODE_STATE)
5212 313bcead Iustin Pop
5213 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
5214 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5215 313bcead Iustin Pop
5216 313bcead Iustin Pop
    if instance.admin_up:
5217 313bcead Iustin Pop
      # check memory requirements on the secondary node
5218 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5219 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5220 313bcead Iustin Pop
                           instance.hypervisor)
5221 313bcead Iustin Pop
    else:
5222 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5223 313bcead Iustin Pop
                   " instance will not be started")
5224 313bcead Iustin Pop
5225 313bcead Iustin Pop
    # check bridge existance
5226 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5227 313bcead Iustin Pop
5228 313bcead Iustin Pop
  def Exec(self, feedback_fn):
5229 313bcead Iustin Pop
    """Move an instance.
5230 313bcead Iustin Pop

5231 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
5232 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
5233 313bcead Iustin Pop

5234 313bcead Iustin Pop
    """
5235 313bcead Iustin Pop
    instance = self.instance
5236 313bcead Iustin Pop
5237 313bcead Iustin Pop
    source_node = instance.primary_node
5238 313bcead Iustin Pop
    target_node = self.target_node
5239 313bcead Iustin Pop
5240 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
5241 313bcead Iustin Pop
                 instance.name, source_node)
5242 313bcead Iustin Pop
5243 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5244 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5245 313bcead Iustin Pop
    msg = result.fail_msg
5246 313bcead Iustin Pop
    if msg:
5247 313bcead Iustin Pop
      if self.op.ignore_consistency:
5248 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5249 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
5250 313bcead Iustin Pop
                             " %s is down. Error details: %s",
5251 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
5252 313bcead Iustin Pop
      else:
5253 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5254 313bcead Iustin Pop
                                 " node %s: %s" %
5255 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
5256 313bcead Iustin Pop
5257 313bcead Iustin Pop
    # create the target disks
5258 313bcead Iustin Pop
    try:
5259 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
5260 313bcead Iustin Pop
    except errors.OpExecError:
5261 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5262 313bcead Iustin Pop
      try:
5263 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5264 313bcead Iustin Pop
      finally:
5265 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5266 313bcead Iustin Pop
        raise
5267 313bcead Iustin Pop
5268 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5269 313bcead Iustin Pop
5270 313bcead Iustin Pop
    errs = []
5271 313bcead Iustin Pop
    # activate, get path, copy the data over
5272 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
5273 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
5274 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5275 313bcead Iustin Pop
                                               instance.name, True)
5276 313bcead Iustin Pop
      if result.fail_msg:
5277 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
5278 313bcead Iustin Pop
                        idx, result.fail_msg)
5279 313bcead Iustin Pop
        errs.append(result.fail_msg)
5280 313bcead Iustin Pop
        break
5281 313bcead Iustin Pop
      dev_path = result.payload
5282 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
5283 313bcead Iustin Pop
                                             target_node, dev_path,
5284 313bcead Iustin Pop
                                             cluster_name)
5285 313bcead Iustin Pop
      if result.fail_msg:
5286 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
5287 313bcead Iustin Pop
                        idx, result.fail_msg)
5288 313bcead Iustin Pop
        errs.append(result.fail_msg)
5289 313bcead Iustin Pop
        break
5290 313bcead Iustin Pop
5291 313bcead Iustin Pop
    if errs:
5292 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
5293 313bcead Iustin Pop
      try:
5294 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5295 313bcead Iustin Pop
      finally:
5296 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5297 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
5298 313bcead Iustin Pop
                                 (",".join(errs),))
5299 313bcead Iustin Pop
5300 313bcead Iustin Pop
    instance.primary_node = target_node
5301 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5302 313bcead Iustin Pop
5303 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
5304 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
5305 313bcead Iustin Pop
5306 313bcead Iustin Pop
    # Only start the instance if it's marked as up
5307 313bcead Iustin Pop
    if instance.admin_up:
5308 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
5309 313bcead Iustin Pop
                   instance.name, target_node)
5310 313bcead Iustin Pop
5311 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5312 313bcead Iustin Pop
                                           ignore_secondaries=True)
5313 313bcead Iustin Pop
      if not disks_ok:
5314 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5315 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
5316 313bcead Iustin Pop
5317 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5318 313bcead Iustin Pop
      msg = result.fail_msg
5319 313bcead Iustin Pop
      if msg:
5320 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5321 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5322 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
5323 313bcead Iustin Pop
5324 313bcead Iustin Pop
5325 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
5326 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
5327 80cb875c Michael Hanselmann

5328 80cb875c Michael Hanselmann
  """
5329 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
5330 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
5331 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
5332 80cb875c Michael Hanselmann
  REQ_BGL = False
5333 80cb875c Michael Hanselmann
5334 80cb875c Michael Hanselmann
  def ExpandNames(self):
5335 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5336 80cb875c Michael Hanselmann
5337 80cb875c Michael Hanselmann
    self.needed_locks = {
5338 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
5339 80cb875c Michael Hanselmann
      }
5340 80cb875c Michael Hanselmann
5341 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5342 80cb875c Michael Hanselmann
5343 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
5344 80cb875c Michael Hanselmann
    names = []
5345 80cb875c Michael Hanselmann
    tasklets = []
5346 80cb875c Michael Hanselmann
5347 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5348 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
5349 80cb875c Michael Hanselmann
      names.append(inst.name)
5350 80cb875c Michael Hanselmann
5351 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5352 80cb875c Michael Hanselmann
5353 80cb875c Michael Hanselmann
    self.tasklets = tasklets
5354 80cb875c Michael Hanselmann
5355 80cb875c Michael Hanselmann
    # Declare instance locks
5356 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5357 80cb875c Michael Hanselmann
5358 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
5359 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
5360 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
5361 80cb875c Michael Hanselmann
5362 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
5363 80cb875c Michael Hanselmann
    """Build hooks env.
5364 80cb875c Michael Hanselmann

5365 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
5366 80cb875c Michael Hanselmann

5367 80cb875c Michael Hanselmann
    """
5368 80cb875c Michael Hanselmann
    env = {
5369 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
5370 80cb875c Michael Hanselmann
      }
5371 80cb875c Michael Hanselmann
5372 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
5373 80cb875c Michael Hanselmann
5374 80cb875c Michael Hanselmann
    return (env, nl, nl)
5375 80cb875c Michael Hanselmann
5376 80cb875c Michael Hanselmann
5377 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
5378 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
5379 3e06e001 Michael Hanselmann
    """Initializes this class.
5380 3e06e001 Michael Hanselmann

5381 3e06e001 Michael Hanselmann
    """
5382 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
5383 464243a7 Michael Hanselmann
5384 3e06e001 Michael Hanselmann
    # Parameters
5385 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
5386 3e06e001 Michael Hanselmann
    self.live = live
5387 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
5388 3e06e001 Michael Hanselmann
5389 53c776b5 Iustin Pop
  def CheckPrereq(self):
5390 53c776b5 Iustin Pop
    """Check prerequisites.
5391 53c776b5 Iustin Pop

5392 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
5393 53c776b5 Iustin Pop

5394 53c776b5 Iustin Pop
    """
5395 cf26a87a Iustin Pop
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5396 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(instance_name)
5397 cf26a87a Iustin Pop
    assert instance is not None
5398 53c776b5 Iustin Pop
5399 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5400 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
5401 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5402 53c776b5 Iustin Pop
5403 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
5404 53c776b5 Iustin Pop
    if not secondary_nodes:
5405 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
5406 733a2b6a Iustin Pop
                                      " drbd8 disk template")
5407 53c776b5 Iustin Pop
5408 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5409 53c776b5 Iustin Pop
5410 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
5411 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
5412 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5413 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
5414 53c776b5 Iustin Pop
                         instance.hypervisor)
5415 53c776b5 Iustin Pop
5416 53c776b5 Iustin Pop
    # check bridge existance
5417 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5418 53c776b5 Iustin Pop
5419 3e06e001 Michael Hanselmann
    if not self.cleanup:
5420 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
5421 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
5422 53c776b5 Iustin Pop
                                                 instance)
5423 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
5424 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
5425 53c776b5 Iustin Pop
5426 53c776b5 Iustin Pop
    self.instance = instance
5427 53c776b5 Iustin Pop
5428 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
5429 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
5430 53c776b5 Iustin Pop

5431 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
5432 53c776b5 Iustin Pop

5433 53c776b5 Iustin Pop
    """
5434 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
5435 53c776b5 Iustin Pop
    all_done = False
5436 53c776b5 Iustin Pop
    while not all_done:
5437 53c776b5 Iustin Pop
      all_done = True
5438 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5439 53c776b5 Iustin Pop
                                            self.nodes_ip,
5440 53c776b5 Iustin Pop
                                            self.instance.disks)
5441 53c776b5 Iustin Pop
      min_percent = 100
5442 53c776b5 Iustin Pop
      for node, nres in result.items():
5443 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
5444 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
5445 53c776b5 Iustin Pop
        all_done = all_done and node_done
5446 53c776b5 Iustin Pop
        if node_percent is not None:
5447 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
5448 53c776b5 Iustin Pop
      if not all_done:
5449 53c776b5 Iustin Pop
        if min_percent < 100:
5450 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5451 53c776b5 Iustin Pop
        time.sleep(2)
5452 53c776b5 Iustin Pop
5453 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
5454 53c776b5 Iustin Pop
    """Demote a node to secondary.
5455 53c776b5 Iustin Pop

5456 53c776b5 Iustin Pop
    """
5457 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
5458 53c776b5 Iustin Pop
5459 53c776b5 Iustin Pop
    for dev in self.instance.disks:
5460 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
5461 53c776b5 Iustin Pop
5462 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5463 53c776b5 Iustin Pop
                                          self.instance.disks)
5464 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
5465 53c776b5 Iustin Pop
5466 53c776b5 Iustin Pop
  def _GoStandalone(self):
5467 53c776b5 Iustin Pop
    """Disconnect from the network.
5468 53c776b5 Iustin Pop

5469 53c776b5 Iustin Pop
    """
5470 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
5471 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5472 53c776b5 Iustin Pop
                                               self.instance.disks)
5473 53c776b5 Iustin Pop
    for node, nres in result.items():
5474 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
5475 53c776b5 Iustin Pop
5476 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
5477 53c776b5 Iustin Pop
    """Reconnect to the network.
5478 53c776b5 Iustin Pop

5479 53c776b5 Iustin Pop
    """
5480 53c776b5 Iustin Pop
    if multimaster:
5481 53c776b5 Iustin Pop
      msg = "dual-master"
5482 53c776b5 Iustin Pop
    else:
5483 53c776b5 Iustin Pop
      msg = "single-master"
5484 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
5485 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5486 53c776b5 Iustin Pop
                                           self.instance.disks,
5487 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
5488 53c776b5 Iustin Pop
    for node, nres in result.items():
5489 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
5490 53c776b5 Iustin Pop
5491 53c776b5 Iustin Pop
  def _ExecCleanup(self):
5492 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
5493 53c776b5 Iustin Pop

5494 53c776b5 Iustin Pop
    The cleanup is done by:
5495 53c776b5 Iustin Pop
      - check that the instance is running only on one node
5496 53c776b5 Iustin Pop
        (and update the config if needed)
5497 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
5498 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5499 53c776b5 Iustin Pop
      - disconnect from the network
5500 53c776b5 Iustin Pop
      - change disks into single-master mode
5501 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
5502 53c776b5 Iustin Pop

5503 53c776b5 Iustin Pop
    """
5504 53c776b5 Iustin Pop
    instance = self.instance
5505 53c776b5 Iustin Pop
    target_node = self.target_node
5506 53c776b5 Iustin Pop
    source_node = self.source_node
5507 53c776b5 Iustin Pop
5508 53c776b5 Iustin Pop
    # check running on only one node
5509 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5510 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5511 53c776b5 Iustin Pop
                     " a bad state)")
5512 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5513 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5514 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5515 53c776b5 Iustin Pop
5516 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5517 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5518 53c776b5 Iustin Pop
5519 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5520 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5521 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5522 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5523 53c776b5 Iustin Pop
                               " and restart this operation.")
5524 53c776b5 Iustin Pop
5525 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5526 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5527 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5528 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5529 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5530 53c776b5 Iustin Pop
5531 53c776b5 Iustin Pop
    if runningon_target:
5532 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5533 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5534 53c776b5 Iustin Pop
                       " updating config" % target_node)
5535 53c776b5 Iustin Pop
      instance.primary_node = target_node
5536 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5537 53c776b5 Iustin Pop
      demoted_node = source_node
5538 53c776b5 Iustin Pop
    else:
5539 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5540 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5541 53c776b5 Iustin Pop
      demoted_node = target_node
5542 53c776b5 Iustin Pop
5543 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5544 53c776b5 Iustin Pop
    try:
5545 53c776b5 Iustin Pop
      self._WaitUntilSync()
5546 53c776b5 Iustin Pop
    except errors.OpExecError:
5547 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5548 53c776b5 Iustin Pop
      # won't be able to sync
5549 53c776b5 Iustin Pop
      pass
5550 53c776b5 Iustin Pop
    self._GoStandalone()
5551 53c776b5 Iustin Pop
    self._GoReconnect(False)
5552 53c776b5 Iustin Pop
    self._WaitUntilSync()
5553 53c776b5 Iustin Pop
5554 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5555 53c776b5 Iustin Pop
5556 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5557 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5558 6906a9d8 Guido Trotter

5559 6906a9d8 Guido Trotter
    """
5560 6906a9d8 Guido Trotter
    target_node = self.target_node
5561 6906a9d8 Guido Trotter
    try:
5562 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5563 6906a9d8 Guido Trotter
      self._GoStandalone()
5564 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5565 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5566 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5567 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5568 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5569 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5570 3e06e001 Michael Hanselmann
                         str(err))
5571 6906a9d8 Guido Trotter
5572 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5573 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5574 6906a9d8 Guido Trotter

5575 6906a9d8 Guido Trotter
    """
5576 6906a9d8 Guido Trotter
    instance = self.instance
5577 6906a9d8 Guido Trotter
    target_node = self.target_node
5578 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5579 6906a9d8 Guido Trotter
5580 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5581 6906a9d8 Guido Trotter
                                                    instance,
5582 6906a9d8 Guido Trotter
                                                    migration_info,
5583 6906a9d8 Guido Trotter
                                                    False)
5584 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5585 6906a9d8 Guido Trotter
    if abort_msg:
5586 099c52ad Iustin Pop
      logging.error("Aborting migration failed on target node %s: %s",
5587 099c52ad Iustin Pop
                    target_node, abort_msg)
5588 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5589 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5590 6906a9d8 Guido Trotter
5591 53c776b5 Iustin Pop
  def _ExecMigration(self):
5592 53c776b5 Iustin Pop
    """Migrate an instance.
5593 53c776b5 Iustin Pop

5594 53c776b5 Iustin Pop
    The migrate is done by:
5595 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5596 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5597 53c776b5 Iustin Pop
      - migrate the instance
5598 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5599 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5600 53c776b5 Iustin Pop
      - change disks into single-master mode
5601 53c776b5 Iustin Pop

5602 53c776b5 Iustin Pop
    """
5603 53c776b5 Iustin Pop
    instance = self.instance
5604 53c776b5 Iustin Pop
    target_node = self.target_node
5605 53c776b5 Iustin Pop
    source_node = self.source_node
5606 53c776b5 Iustin Pop
5607 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5608 53c776b5 Iustin Pop
    for dev in instance.disks:
5609 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5610 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5611 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5612 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5613 53c776b5 Iustin Pop
5614 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5615 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5616 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5617 6906a9d8 Guido Trotter
    if msg:
5618 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5619 0959c824 Iustin Pop
                 (source_node, msg))
5620 6906a9d8 Guido Trotter
      logging.error(log_err)
5621 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5622 6906a9d8 Guido Trotter
5623 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5624 6906a9d8 Guido Trotter
5625 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5626 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5627 53c776b5 Iustin Pop
    self._GoStandalone()
5628 53c776b5 Iustin Pop
    self._GoReconnect(True)
5629 53c776b5 Iustin Pop
    self._WaitUntilSync()
5630 53c776b5 Iustin Pop
5631 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5632 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5633 6906a9d8 Guido Trotter
                                           instance,
5634 6906a9d8 Guido Trotter
                                           migration_info,
5635 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5636 6906a9d8 Guido Trotter
5637 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5638 6906a9d8 Guido Trotter
    if msg:
5639 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5640 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5641 78212a5d Iustin Pop
      self.feedback_fn("Pre-migration failed, aborting")
5642 6906a9d8 Guido Trotter
      self._AbortMigration()
5643 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5644 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5645 6906a9d8 Guido Trotter
                               (instance.name, msg))
5646 6906a9d8 Guido Trotter
5647 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5648 53c776b5 Iustin Pop
    time.sleep(10)
5649 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5650 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5651 3e06e001 Michael Hanselmann
                                            self.live)
5652 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5653 53c776b5 Iustin Pop
    if msg:
5654 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5655 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5656 78212a5d Iustin Pop
      self.feedback_fn("Migration failed, aborting")
5657 6906a9d8 Guido Trotter
      self._AbortMigration()
5658 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5659 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5660 53c776b5 Iustin Pop
                               (instance.name, msg))
5661 53c776b5 Iustin Pop
    time.sleep(10)
5662 53c776b5 Iustin Pop
5663 53c776b5 Iustin Pop
    instance.primary_node = target_node
5664 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5665 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5666 53c776b5 Iustin Pop
5667 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5668 6906a9d8 Guido Trotter
                                              instance,
5669 6906a9d8 Guido Trotter
                                              migration_info,
5670 6906a9d8 Guido Trotter
                                              True)
5671 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5672 6906a9d8 Guido Trotter
    if msg:
5673 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5674 099c52ad Iustin Pop
                    " %s", msg)
5675 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5676 6906a9d8 Guido Trotter
                               msg)
5677 6906a9d8 Guido Trotter
5678 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5679 53c776b5 Iustin Pop
    self._WaitUntilSync()
5680 53c776b5 Iustin Pop
    self._GoStandalone()
5681 53c776b5 Iustin Pop
    self._GoReconnect(False)
5682 53c776b5 Iustin Pop
    self._WaitUntilSync()
5683 53c776b5 Iustin Pop
5684 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5685 53c776b5 Iustin Pop
5686 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5687 53c776b5 Iustin Pop
    """Perform the migration.
5688 53c776b5 Iustin Pop

5689 53c776b5 Iustin Pop
    """
5690 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5691 80cb875c Michael Hanselmann
5692 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5693 53c776b5 Iustin Pop
5694 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5695 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5696 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5697 53c776b5 Iustin Pop
    self.nodes_ip = {
5698 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5699 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5700 53c776b5 Iustin Pop
      }
5701 3e06e001 Michael Hanselmann
5702 3e06e001 Michael Hanselmann
    if self.cleanup:
5703 53c776b5 Iustin Pop
      return self._ExecCleanup()
5704 53c776b5 Iustin Pop
    else:
5705 53c776b5 Iustin Pop
      return self._ExecMigration()
5706 53c776b5 Iustin Pop
5707 53c776b5 Iustin Pop
5708 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5709 428958aa Iustin Pop
                    info, force_open):
5710 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5711 a8083063 Iustin Pop

5712 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5713 a8083063 Iustin Pop
  all its children.
5714 a8083063 Iustin Pop

5715 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5716 a8083063 Iustin Pop

5717 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5718 428958aa Iustin Pop
  @param node: the node on which to create the device
5719 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5720 428958aa Iustin Pop
  @param instance: the instance which owns the device
5721 428958aa Iustin Pop
  @type device: L{objects.Disk}
5722 428958aa Iustin Pop
  @param device: the device to create
5723 428958aa Iustin Pop
  @type force_create: boolean
5724 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5725 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5726 428958aa Iustin Pop
      CreateOnSecondary() attribute
5727 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5728 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5729 428958aa Iustin Pop
  @type force_open: boolean
5730 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5731 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5732 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5733 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5734 428958aa Iustin Pop

5735 a8083063 Iustin Pop
  """
5736 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5737 428958aa Iustin Pop
    force_create = True
5738 796cab27 Iustin Pop
5739 a8083063 Iustin Pop
  if device.children:
5740 a8083063 Iustin Pop
    for child in device.children:
5741 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5742 428958aa Iustin Pop
                      info, force_open)
5743 a8083063 Iustin Pop
5744 428958aa Iustin Pop
  if not force_create:
5745 796cab27 Iustin Pop
    return
5746 796cab27 Iustin Pop
5747 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5748 de12473a Iustin Pop
5749 de12473a Iustin Pop
5750 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5751 de12473a Iustin Pop
  """Create a single block device on a given node.
5752 de12473a Iustin Pop

5753 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5754 de12473a Iustin Pop
  created in advance.
5755 de12473a Iustin Pop

5756 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5757 de12473a Iustin Pop
  @param node: the node on which to create the device
5758 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5759 de12473a Iustin Pop
  @param instance: the instance which owns the device
5760 de12473a Iustin Pop
  @type device: L{objects.Disk}
5761 de12473a Iustin Pop
  @param device: the device to create
5762 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5763 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5764 de12473a Iustin Pop
  @type force_open: boolean
5765 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5766 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5767 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5768 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5769 de12473a Iustin Pop

5770 de12473a Iustin Pop
  """
5771 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5772 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5773 428958aa Iustin Pop
                                       instance.name, force_open, info)
5774 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5775 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5776 a8083063 Iustin Pop
  if device.physical_id is None:
5777 0959c824 Iustin Pop
    device.physical_id = result.payload
5778 a8083063 Iustin Pop
5779 a8083063 Iustin Pop
5780 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5781 923b1523 Iustin Pop
  """Generate a suitable LV name.
5782 923b1523 Iustin Pop

5783 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5784 923b1523 Iustin Pop

5785 923b1523 Iustin Pop
  """
5786 923b1523 Iustin Pop
  results = []
5787 923b1523 Iustin Pop
  for val in exts:
5788 4fae38c5 Guido Trotter
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5789 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5790 923b1523 Iustin Pop
  return results
5791 923b1523 Iustin Pop
5792 923b1523 Iustin Pop
5793 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5794 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5795 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5796 a1f445d3 Iustin Pop

5797 a1f445d3 Iustin Pop
  """
5798 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5799 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5800 afa1386e Guido Trotter
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5801 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5802 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5803 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5804 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5805 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5806 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5807 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5808 f9518d38 Iustin Pop
                                      shared_secret),
5809 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5810 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5811 a1f445d3 Iustin Pop
  return drbd_dev
5812 a1f445d3 Iustin Pop
5813 7c0d6283 Michael Hanselmann
5814 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5815 a8083063 Iustin Pop
                          instance_name, primary_node,
5816 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5817 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5818 e2a65344 Iustin Pop
                          base_index):
5819 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5820 a8083063 Iustin Pop

5821 a8083063 Iustin Pop
  """
5822 a8083063 Iustin Pop
  #TODO: compute space requirements
5823 a8083063 Iustin Pop
5824 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5825 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5826 08db7c5c Iustin Pop
  disks = []
5827 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5828 08db7c5c Iustin Pop
    pass
5829 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5830 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5831 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5832 923b1523 Iustin Pop
5833 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5834 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5835 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5836 e2a65344 Iustin Pop
      disk_index = idx + base_index
5837 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5838 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5839 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5840 6ec66eae Iustin Pop
                              mode=disk["mode"])
5841 08db7c5c Iustin Pop
      disks.append(disk_dev)
5842 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5843 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5844 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5845 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5846 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5847 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5848 08db7c5c Iustin Pop
5849 e6c1ff2f Iustin Pop
    names = []
5850 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5851 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5852 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5853 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5854 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5855 112050d9 Iustin Pop
      disk_index = idx + base_index
5856 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5857 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5858 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5859 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5860 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5861 08db7c5c Iustin Pop
      disks.append(disk_dev)
5862 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5863 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5864 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5865 0f1a06e3 Manuel Franceschini
5866 0e3baaf3 Iustin Pop
    _RequireFileStorage()
5867 0e3baaf3 Iustin Pop
5868 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5869 112050d9 Iustin Pop
      disk_index = idx + base_index
5870 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5871 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5872 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5873 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5874 43e99cff Guido Trotter
                                                         disk_index)),
5875 6ec66eae Iustin Pop
                              mode=disk["mode"])
5876 08db7c5c Iustin Pop
      disks.append(disk_dev)
5877 a8083063 Iustin Pop
  else:
5878 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5879 a8083063 Iustin Pop
  return disks
5880 a8083063 Iustin Pop
5881 a8083063 Iustin Pop
5882 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5883 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5884 3ecf6786 Iustin Pop

5885 3ecf6786 Iustin Pop
  """
5886 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5887 a0c3fea1 Michael Hanselmann
5888 a0c3fea1 Michael Hanselmann
5889 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5890 a8083063 Iustin Pop
  """Create all disks for an instance.
5891 a8083063 Iustin Pop

5892 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5893 a8083063 Iustin Pop

5894 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5895 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5896 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5897 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5898 bd315bfa Iustin Pop
  @type to_skip: list
5899 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5900 621b7678 Iustin Pop
  @type target_node: string
5901 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5902 e4376078 Iustin Pop
  @rtype: boolean
5903 e4376078 Iustin Pop
  @return: the success of the creation
5904 a8083063 Iustin Pop

5905 a8083063 Iustin Pop
  """
5906 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5907 621b7678 Iustin Pop
  if target_node is None:
5908 621b7678 Iustin Pop
    pnode = instance.primary_node
5909 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5910 621b7678 Iustin Pop
  else:
5911 621b7678 Iustin Pop
    pnode = target_node
5912 621b7678 Iustin Pop
    all_nodes = [pnode]
5913 a0c3fea1 Michael Hanselmann
5914 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5915 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5916 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5917 0f1a06e3 Manuel Franceschini
5918 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5919 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5920 0f1a06e3 Manuel Franceschini
5921 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5922 24991749 Iustin Pop
  # LUSetInstanceParams
5923 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5924 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5925 bd315bfa Iustin Pop
      continue
5926 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5927 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5928 a8083063 Iustin Pop
    #HARDCODE
5929 621b7678 Iustin Pop
    for node in all_nodes:
5930 428958aa Iustin Pop
      f_create = node == pnode
5931 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5932 a8083063 Iustin Pop
5933 a8083063 Iustin Pop
5934 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5935 a8083063 Iustin Pop
  """Remove all disks for an instance.
5936 a8083063 Iustin Pop

5937 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5938 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5939 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5940 a8083063 Iustin Pop
  with `_CreateDisks()`).
5941 a8083063 Iustin Pop

5942 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5943 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5944 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5945 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5946 621b7678 Iustin Pop
  @type target_node: string
5947 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5948 e4376078 Iustin Pop
  @rtype: boolean
5949 e4376078 Iustin Pop
  @return: the success of the removal
5950 a8083063 Iustin Pop

5951 a8083063 Iustin Pop
  """
5952 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5953 a8083063 Iustin Pop
5954 e1bc0878 Iustin Pop
  all_result = True
5955 a8083063 Iustin Pop
  for device in instance.disks:
5956 621b7678 Iustin Pop
    if target_node:
5957 621b7678 Iustin Pop
      edata = [(target_node, device)]
5958 621b7678 Iustin Pop
    else:
5959 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5960 621b7678 Iustin Pop
    for node, disk in edata:
5961 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5962 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5963 e1bc0878 Iustin Pop
      if msg:
5964 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5965 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5966 e1bc0878 Iustin Pop
        all_result = False
5967 0f1a06e3 Manuel Franceschini
5968 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5969 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5970 dfc2a24c Guido Trotter
    if target_node:
5971 dfc2a24c Guido Trotter
      tgt = target_node
5972 621b7678 Iustin Pop
    else:
5973 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5974 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5975 621b7678 Iustin Pop
    if result.fail_msg:
5976 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5977 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5978 e1bc0878 Iustin Pop
      all_result = False
5979 0f1a06e3 Manuel Franceschini
5980 e1bc0878 Iustin Pop
  return all_result
5981 a8083063 Iustin Pop
5982 a8083063 Iustin Pop
5983 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5984 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5985 e2fe6369 Iustin Pop

5986 e2fe6369 Iustin Pop
  """
5987 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5988 e2fe6369 Iustin Pop
  req_size_dict = {
5989 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5990 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5991 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5992 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5993 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5994 e2fe6369 Iustin Pop
  }
5995 e2fe6369 Iustin Pop
5996 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5997 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5998 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5999 e2fe6369 Iustin Pop
6000 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
6001 e2fe6369 Iustin Pop
6002 e2fe6369 Iustin Pop
6003 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6004 74409b12 Iustin Pop
  """Hypervisor parameter validation.
6005 74409b12 Iustin Pop

6006 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
6007 74409b12 Iustin Pop
  used in both instance create and instance modify.
6008 74409b12 Iustin Pop

6009 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
6010 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
6011 74409b12 Iustin Pop
  @type nodenames: list
6012 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
6013 74409b12 Iustin Pop
  @type hvname: string
6014 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
6015 74409b12 Iustin Pop
  @type hvparams: dict
6016 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
6017 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
6018 74409b12 Iustin Pop

6019 74409b12 Iustin Pop
  """
6020 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6021 74409b12 Iustin Pop
                                                  hvname,
6022 74409b12 Iustin Pop
                                                  hvparams)
6023 74409b12 Iustin Pop
  for node in nodenames:
6024 781de953 Iustin Pop
    info = hvinfo[node]
6025 68c6f21c Iustin Pop
    if info.offline:
6026 68c6f21c Iustin Pop
      continue
6027 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6028 74409b12 Iustin Pop
6029 74409b12 Iustin Pop
6030 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
6031 a8083063 Iustin Pop
  """Create an instance.
6032 a8083063 Iustin Pop

6033 a8083063 Iustin Pop
  """
6034 a8083063 Iustin Pop
  HPATH = "instance-add"
6035 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6036 f276c4b5 Iustin Pop
  _OP_REQP = ["instance_name", "disks",
6037 08db7c5c Iustin Pop
              "mode", "start",
6038 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
6039 338e51e8 Iustin Pop
              "hvparams", "beparams"]
6040 7baf741d Guido Trotter
  REQ_BGL = False
6041 7baf741d Guido Trotter
6042 5f23e043 Iustin Pop
  def CheckArguments(self):
6043 5f23e043 Iustin Pop
    """Check arguments.
6044 5f23e043 Iustin Pop

6045 5f23e043 Iustin Pop
    """
6046 df4272e5 Iustin Pop
    # set optional parameters to none if they don't exist
6047 f276c4b5 Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor",
6048 e588764d Iustin Pop
                 "disk_template", "identify_defaults"]:
6049 df4272e5 Iustin Pop
      if not hasattr(self.op, attr):
6050 df4272e5 Iustin Pop
        setattr(self.op, attr, None)
6051 df4272e5 Iustin Pop
6052 5f23e043 Iustin Pop
    # do not require name_check to ease forward/backward compatibility
6053 5f23e043 Iustin Pop
    # for tools
6054 5f23e043 Iustin Pop
    if not hasattr(self.op, "name_check"):
6055 5f23e043 Iustin Pop
      self.op.name_check = True
6056 25a8792c Iustin Pop
    if not hasattr(self.op, "no_install"):
6057 25a8792c Iustin Pop
      self.op.no_install = False
6058 25a8792c Iustin Pop
    if self.op.no_install and self.op.start:
6059 25a8792c Iustin Pop
      self.LogInfo("No-installation mode selected, disabling startup")
6060 25a8792c Iustin Pop
      self.op.start = False
6061 44caf5a8 Iustin Pop
    # validate/normalize the instance name
6062 44caf5a8 Iustin Pop
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
6063 5f23e043 Iustin Pop
    if self.op.ip_check and not self.op.name_check:
6064 5f23e043 Iustin Pop
      # TODO: make the ip check more flexible and not depend on the name check
6065 5f23e043 Iustin Pop
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6066 5f23e043 Iustin Pop
                                 errors.ECODE_INVAL)
6067 c3589cf8 Iustin Pop
    # check disk information: either all adopt, or no adopt
6068 c3589cf8 Iustin Pop
    has_adopt = has_no_adopt = False
6069 c3589cf8 Iustin Pop
    for disk in self.op.disks:
6070 c3589cf8 Iustin Pop
      if "adopt" in disk:
6071 c3589cf8 Iustin Pop
        has_adopt = True
6072 c3589cf8 Iustin Pop
      else:
6073 c3589cf8 Iustin Pop
        has_no_adopt = True
6074 c3589cf8 Iustin Pop
    if has_adopt and has_no_adopt:
6075 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Either all disks are adopted or none is",
6076 c3589cf8 Iustin Pop
                                 errors.ECODE_INVAL)
6077 c3589cf8 Iustin Pop
    if has_adopt:
6078 c3589cf8 Iustin Pop
      if self.op.disk_template != constants.DT_PLAIN:
6079 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption is only supported for the"
6080 c3589cf8 Iustin Pop
                                   " 'plain' disk template",
6081 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6082 c3589cf8 Iustin Pop
      if self.op.iallocator is not None:
6083 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6084 c3589cf8 Iustin Pop
                                   " iallocator script", errors.ECODE_INVAL)
6085 c3589cf8 Iustin Pop
      if self.op.mode == constants.INSTANCE_IMPORT:
6086 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed for"
6087 c3589cf8 Iustin Pop
                                   " instance import", errors.ECODE_INVAL)
6088 c3589cf8 Iustin Pop
6089 c3589cf8 Iustin Pop
    self.adopt_disks = has_adopt
6090 5f23e043 Iustin Pop
6091 417eabe2 Iustin Pop
    # verify creation mode
6092 417eabe2 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
6093 417eabe2 Iustin Pop
                            constants.INSTANCE_IMPORT):
6094 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
6095 417eabe2 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
6096 417eabe2 Iustin Pop
6097 417eabe2 Iustin Pop
    # instance name verification
6098 417eabe2 Iustin Pop
    if self.op.name_check:
6099 417eabe2 Iustin Pop
      self.hostname1 = utils.GetHostInfo(self.op.instance_name)
6100 417eabe2 Iustin Pop
      self.op.instance_name = self.hostname1.name
6101 417eabe2 Iustin Pop
      # used in CheckPrereq for ip ping check
6102 417eabe2 Iustin Pop
      self.check_ip = self.hostname1.ip
6103 417eabe2 Iustin Pop
    else:
6104 417eabe2 Iustin Pop
      self.check_ip = None
6105 417eabe2 Iustin Pop
6106 417eabe2 Iustin Pop
    # file storage checks
6107 417eabe2 Iustin Pop
    if (self.op.file_driver and
6108 417eabe2 Iustin Pop
        not self.op.file_driver in constants.FILE_DRIVER):
6109 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6110 417eabe2 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
6111 417eabe2 Iustin Pop
6112 417eabe2 Iustin Pop
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6113 417eabe2 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
6114 417eabe2 Iustin Pop
                                 errors.ECODE_INVAL)
6115 417eabe2 Iustin Pop
6116 417eabe2 Iustin Pop
    ### Node/iallocator related checks
6117 417eabe2 Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
6118 417eabe2 Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
6119 417eabe2 Iustin Pop
                                 " node must be given",
6120 417eabe2 Iustin Pop
                                 errors.ECODE_INVAL)
6121 417eabe2 Iustin Pop
6122 417eabe2 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6123 417eabe2 Iustin Pop
      # On import force_variant must be True, because if we forced it at
6124 417eabe2 Iustin Pop
      # initial install, our only chance when importing it back is that it
6125 417eabe2 Iustin Pop
      # works again!
6126 417eabe2 Iustin Pop
      self.op.force_variant = True
6127 417eabe2 Iustin Pop
6128 417eabe2 Iustin Pop
      if self.op.no_install:
6129 417eabe2 Iustin Pop
        self.LogInfo("No-installation mode has no effect during import")
6130 417eabe2 Iustin Pop
6131 417eabe2 Iustin Pop
    else: # INSTANCE_CREATE
6132 417eabe2 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
6133 417eabe2 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
6134 417eabe2 Iustin Pop
                                   errors.ECODE_INVAL)
6135 417eabe2 Iustin Pop
      self.op.force_variant = getattr(self.op, "force_variant", False)
6136 f276c4b5 Iustin Pop
      if self.op.disk_template is None:
6137 f276c4b5 Iustin Pop
        raise errors.OpPrereqError("No disk template specified",
6138 f276c4b5 Iustin Pop
                                   errors.ECODE_INVAL)
6139 417eabe2 Iustin Pop
6140 7baf741d Guido Trotter
  def ExpandNames(self):
6141 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
6142 7baf741d Guido Trotter

6143 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
6144 7baf741d Guido Trotter

6145 7baf741d Guido Trotter
    """
6146 7baf741d Guido Trotter
    self.needed_locks = {}
6147 7baf741d Guido Trotter
6148 417eabe2 Iustin Pop
    instance_name = self.op.instance_name
6149 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
6150 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
6151 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
6152 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6153 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
6154 7baf741d Guido Trotter
6155 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6156 7baf741d Guido Trotter
6157 7baf741d Guido Trotter
    if self.op.iallocator:
6158 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6159 7baf741d Guido Trotter
    else:
6160 cf26a87a Iustin Pop
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6161 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
6162 7baf741d Guido Trotter
      if self.op.snode is not None:
6163 cf26a87a Iustin Pop
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6164 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
6165 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6166 7baf741d Guido Trotter
6167 7baf741d Guido Trotter
    # in case of import lock the source node too
6168 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6169 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
6170 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
6171 7baf741d Guido Trotter
6172 b9322a9f Guido Trotter
      if src_path is None:
6173 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
6174 b9322a9f Guido Trotter
6175 b9322a9f Guido Trotter
      if src_node is None:
6176 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6177 b9322a9f Guido Trotter
        self.op.src_node = None
6178 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
6179 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
6180 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
6181 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6182 b9322a9f Guido Trotter
      else:
6183 cf26a87a Iustin Pop
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6184 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6185 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6186 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
6187 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
6188 c4feafe8 Iustin Pop
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6189 7baf741d Guido Trotter
6190 538475ca Iustin Pop
  def _RunAllocator(self):
6191 538475ca Iustin Pop
    """Run the allocator based on input opcode.
6192 538475ca Iustin Pop

6193 538475ca Iustin Pop
    """
6194 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
6195 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
6196 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6197 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
6198 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
6199 d1c2dd75 Iustin Pop
                     tags=[],
6200 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
6201 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
6202 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
6203 08db7c5c Iustin Pop
                     disks=self.disks,
6204 d1c2dd75 Iustin Pop
                     nics=nics,
6205 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
6206 29859cb7 Iustin Pop
                     )
6207 d1c2dd75 Iustin Pop
6208 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
6209 d1c2dd75 Iustin Pop
6210 d1c2dd75 Iustin Pop
    if not ial.success:
6211 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
6212 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
6213 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
6214 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6215 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6216 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6217 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
6218 680f0a89 Iustin Pop
                                 (self.op.iallocator, len(ial.result),
6219 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
6220 680f0a89 Iustin Pop
    self.op.pnode = ial.result[0]
6221 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6222 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
6223 680f0a89 Iustin Pop
                 utils.CommaJoin(ial.result))
6224 27579978 Iustin Pop
    if ial.required_nodes == 2:
6225 680f0a89 Iustin Pop
      self.op.snode = ial.result[1]
6226 538475ca Iustin Pop
6227 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6228 a8083063 Iustin Pop
    """Build hooks env.
6229 a8083063 Iustin Pop

6230 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
6231 a8083063 Iustin Pop

6232 a8083063 Iustin Pop
    """
6233 a8083063 Iustin Pop
    env = {
6234 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
6235 a8083063 Iustin Pop
      }
6236 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6237 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
6238 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
6239 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
6240 396e1b78 Michael Hanselmann
6241 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
6242 2c2690c9 Iustin Pop
      name=self.op.instance_name,
6243 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
6244 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
6245 4978db17 Iustin Pop
      status=self.op.start,
6246 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
6247 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
6248 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
6249 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
6250 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
6251 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
6252 67fc3042 Iustin Pop
      bep=self.be_full,
6253 67fc3042 Iustin Pop
      hvp=self.hv_full,
6254 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
6255 396e1b78 Michael Hanselmann
    ))
6256 a8083063 Iustin Pop
6257 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6258 a8083063 Iustin Pop
          self.secondaries)
6259 a8083063 Iustin Pop
    return env, nl, nl
6260 a8083063 Iustin Pop
6261 c1c31426 Iustin Pop
  def _ReadExportInfo(self):
6262 c1c31426 Iustin Pop
    """Reads the export information from disk.
6263 c1c31426 Iustin Pop

6264 c1c31426 Iustin Pop
    It will override the opcode source node and path with the actual
6265 c1c31426 Iustin Pop
    information, if these two were not specified before.
6266 c1c31426 Iustin Pop

6267 c1c31426 Iustin Pop
    @return: the export information
6268 c1c31426 Iustin Pop

6269 c1c31426 Iustin Pop
    """
6270 c1c31426 Iustin Pop
    assert self.op.mode == constants.INSTANCE_IMPORT
6271 c1c31426 Iustin Pop
6272 c1c31426 Iustin Pop
    src_node = self.op.src_node
6273 c1c31426 Iustin Pop
    src_path = self.op.src_path
6274 c1c31426 Iustin Pop
6275 c1c31426 Iustin Pop
    if src_node is None:
6276 c1c31426 Iustin Pop
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6277 c1c31426 Iustin Pop
      exp_list = self.rpc.call_export_list(locked_nodes)
6278 c1c31426 Iustin Pop
      found = False
6279 c1c31426 Iustin Pop
      for node in exp_list:
6280 c1c31426 Iustin Pop
        if exp_list[node].fail_msg:
6281 c1c31426 Iustin Pop
          continue
6282 c1c31426 Iustin Pop
        if src_path in exp_list[node].payload:
6283 c1c31426 Iustin Pop
          found = True
6284 c1c31426 Iustin Pop
          self.op.src_node = src_node = node
6285 c1c31426 Iustin Pop
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6286 c1c31426 Iustin Pop
                                                       src_path)
6287 c1c31426 Iustin Pop
          break
6288 c1c31426 Iustin Pop
      if not found:
6289 c1c31426 Iustin Pop
        raise errors.OpPrereqError("No export found for relative path %s" %
6290 c1c31426 Iustin Pop
                                    src_path, errors.ECODE_INVAL)
6291 c1c31426 Iustin Pop
6292 c1c31426 Iustin Pop
    _CheckNodeOnline(self, src_node)
6293 c1c31426 Iustin Pop
    result = self.rpc.call_export_info(src_node, src_path)
6294 c1c31426 Iustin Pop
    result.Raise("No export or invalid export found in dir %s" % src_path)
6295 c1c31426 Iustin Pop
6296 c1c31426 Iustin Pop
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6297 c1c31426 Iustin Pop
    if not export_info.has_section(constants.INISECT_EXP):
6298 c1c31426 Iustin Pop
      raise errors.ProgrammerError("Corrupted export config",
6299 c1c31426 Iustin Pop
                                   errors.ECODE_ENVIRON)
6300 c1c31426 Iustin Pop
6301 c1c31426 Iustin Pop
    ei_version = export_info.get(constants.INISECT_EXP, "version")
6302 c1c31426 Iustin Pop
    if (int(ei_version) != constants.EXPORT_VERSION):
6303 c1c31426 Iustin Pop
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6304 c1c31426 Iustin Pop
                                 (ei_version, constants.EXPORT_VERSION),
6305 c1c31426 Iustin Pop
                                 errors.ECODE_ENVIRON)
6306 c1c31426 Iustin Pop
    return export_info
6307 a8083063 Iustin Pop
6308 f276c4b5 Iustin Pop
  def _ReadExportParams(self, einfo):
6309 f276c4b5 Iustin Pop
    """Use export parameters as defaults.
6310 f276c4b5 Iustin Pop

6311 f276c4b5 Iustin Pop
    In case the opcode doesn't specify (as in override) some instance
6312 f276c4b5 Iustin Pop
    parameters, then try to use them from the export information, if
6313 f276c4b5 Iustin Pop
    that declares them.
6314 f276c4b5 Iustin Pop

6315 f276c4b5 Iustin Pop
    """
6316 b6cd72b2 Iustin Pop
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
6317 b6cd72b2 Iustin Pop
6318 f276c4b5 Iustin Pop
    if self.op.disk_template is None:
6319 f276c4b5 Iustin Pop
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
6320 f276c4b5 Iustin Pop
        self.op.disk_template = einfo.get(constants.INISECT_INS,
6321 f276c4b5 Iustin Pop
                                          "disk_template")
6322 f276c4b5 Iustin Pop
      else:
6323 f276c4b5 Iustin Pop
        raise errors.OpPrereqError("No disk template specified and the export"
6324 f276c4b5 Iustin Pop
                                   " is missing the disk_template information",
6325 f276c4b5 Iustin Pop
                                   errors.ECODE_INVAL)
6326 f276c4b5 Iustin Pop
6327 9b12ed0f Iustin Pop
    if not self.op.disks:
6328 9b12ed0f Iustin Pop
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
6329 9b12ed0f Iustin Pop
        disks = []
6330 9b12ed0f Iustin Pop
        # TODO: import the disk iv_name too
6331 9b12ed0f Iustin Pop
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
6332 9b12ed0f Iustin Pop
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
6333 9b12ed0f Iustin Pop
          disks.append({"size": disk_sz})
6334 9b12ed0f Iustin Pop
        self.op.disks = disks
6335 9b12ed0f Iustin Pop
      else:
6336 9b12ed0f Iustin Pop
        raise errors.OpPrereqError("No disk info specified and the export"
6337 9b12ed0f Iustin Pop
                                   " is missing the disk information",
6338 9b12ed0f Iustin Pop
                                   errors.ECODE_INVAL)
6339 9b12ed0f Iustin Pop
6340 0af0f641 Iustin Pop
    if (not self.op.nics and
6341 0af0f641 Iustin Pop
        einfo.has_option(constants.INISECT_INS, "nic_count")):
6342 0af0f641 Iustin Pop
      nics = []
6343 0af0f641 Iustin Pop
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
6344 0af0f641 Iustin Pop
        ndict = {}
6345 0af0f641 Iustin Pop
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
6346 0af0f641 Iustin Pop
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
6347 0af0f641 Iustin Pop
          ndict[name] = v
6348 0af0f641 Iustin Pop
        nics.append(ndict)
6349 0af0f641 Iustin Pop
      self.op.nics = nics
6350 0af0f641 Iustin Pop
6351 9f88b0e8 Iustin Pop
    if (self.op.hypervisor is None and
6352 9f88b0e8 Iustin Pop
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
6353 9f88b0e8 Iustin Pop
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
6354 9f88b0e8 Iustin Pop
    if einfo.has_section(constants.INISECT_HYP):
6355 9f88b0e8 Iustin Pop
      # use the export parameters but do not override the ones
6356 9f88b0e8 Iustin Pop
      # specified by the user
6357 9f88b0e8 Iustin Pop
      for name, value in einfo.items(constants.INISECT_HYP):
6358 9f88b0e8 Iustin Pop
        if name not in self.op.hvparams:
6359 9f88b0e8 Iustin Pop
          self.op.hvparams[name] = value
6360 9f88b0e8 Iustin Pop
6361 cc0d88e9 Iustin Pop
    if einfo.has_section(constants.INISECT_BEP):
6362 cc0d88e9 Iustin Pop
      # use the parameters, without overriding
6363 cc0d88e9 Iustin Pop
      for name, value in einfo.items(constants.INISECT_BEP):
6364 cc0d88e9 Iustin Pop
        if name not in self.op.beparams:
6365 cc0d88e9 Iustin Pop
          self.op.beparams[name] = value
6366 cc0d88e9 Iustin Pop
    else:
6367 cc0d88e9 Iustin Pop
      # try to read the parameters old style, from the main section
6368 cc0d88e9 Iustin Pop
      for name in constants.BES_PARAMETERS:
6369 cc0d88e9 Iustin Pop
        if (name not in self.op.beparams and
6370 cc0d88e9 Iustin Pop
            einfo.has_option(constants.INISECT_INS, name)):
6371 cc0d88e9 Iustin Pop
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
6372 cc0d88e9 Iustin Pop
6373 e588764d Iustin Pop
  def _RevertToDefaults(self, cluster):
6374 e588764d Iustin Pop
    """Revert the instance parameters to the default values.
6375 e588764d Iustin Pop

6376 e588764d Iustin Pop
    """
6377 e588764d Iustin Pop
    # hvparams
6378 e588764d Iustin Pop
    hv_defs = cluster.GetHVDefaults(self.op.hypervisor, self.op.os_type)
6379 e588764d Iustin Pop
    for name in self.op.hvparams.keys():
6380 e588764d Iustin Pop
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
6381 e588764d Iustin Pop
        del self.op.hvparams[name]
6382 e588764d Iustin Pop
    # beparams
6383 e588764d Iustin Pop
    be_defs = cluster.beparams.get(constants.PP_DEFAULT, {})
6384 e588764d Iustin Pop
    for name in self.op.beparams.keys():
6385 e588764d Iustin Pop
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
6386 e588764d Iustin Pop
        del self.op.beparams[name]
6387 e588764d Iustin Pop
    # nic params
6388 e588764d Iustin Pop
    nic_defs = cluster.nicparams.get(constants.PP_DEFAULT, {})
6389 e588764d Iustin Pop
    for nic in self.op.nics:
6390 e588764d Iustin Pop
      for name in constants.NICS_PARAMETERS:
6391 e588764d Iustin Pop
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
6392 e588764d Iustin Pop
          del nic[name]
6393 e588764d Iustin Pop
6394 a8083063 Iustin Pop
  def CheckPrereq(self):
6395 a8083063 Iustin Pop
    """Check prerequisites.
6396 a8083063 Iustin Pop

6397 a8083063 Iustin Pop
    """
6398 c1c31426 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6399 c1c31426 Iustin Pop
      export_info = self._ReadExportInfo()
6400 f276c4b5 Iustin Pop
      self._ReadExportParams(export_info)
6401 f276c4b5 Iustin Pop
6402 f276c4b5 Iustin Pop
    _CheckDiskTemplate(self.op.disk_template)
6403 c1c31426 Iustin Pop
6404 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
6405 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
6406 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6407 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
6408 eedc99de Manuel Franceschini
6409 22f50b1d Iustin Pop
    if self.op.hypervisor is None:
6410 22f50b1d Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
6411 22f50b1d Iustin Pop
6412 22f50b1d Iustin Pop
    cluster = self.cfg.GetClusterInfo()
6413 22f50b1d Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
6414 22f50b1d Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
6415 22f50b1d Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6416 22f50b1d Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
6417 22f50b1d Iustin Pop
                                  ",".join(enabled_hvs)),
6418 22f50b1d Iustin Pop
                                 errors.ECODE_STATE)
6419 22f50b1d Iustin Pop
6420 22f50b1d Iustin Pop
    # check hypervisor parameter syntax (locally)
6421 22f50b1d Iustin Pop
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6422 b6cd72b2 Iustin Pop
    filled_hvp = objects.FillDict(cluster.GetHVDefaults(self.op.hypervisor,
6423 b6cd72b2 Iustin Pop
                                                        self.op.os_type),
6424 22f50b1d Iustin Pop
                                  self.op.hvparams)
6425 22f50b1d Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6426 22f50b1d Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
6427 22f50b1d Iustin Pop
    self.hv_full = filled_hvp
6428 22f50b1d Iustin Pop
    # check that we don't specify global parameters on an instance
6429 22f50b1d Iustin Pop
    _CheckGlobalHvParams(self.op.hvparams)
6430 22f50b1d Iustin Pop
6431 22f50b1d Iustin Pop
    # fill and remember the beparams dict
6432 22f50b1d Iustin Pop
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6433 22f50b1d Iustin Pop
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
6434 22f50b1d Iustin Pop
                                    self.op.beparams)
6435 22f50b1d Iustin Pop
6436 e588764d Iustin Pop
    # now that hvp/bep are in final format, let's reset to defaults,
6437 e588764d Iustin Pop
    # if told to do so
6438 e588764d Iustin Pop
    if self.op.identify_defaults:
6439 e588764d Iustin Pop
      self._RevertToDefaults(cluster)
6440 e588764d Iustin Pop
6441 22f50b1d Iustin Pop
    # NIC buildup
6442 22f50b1d Iustin Pop
    self.nics = []
6443 22f50b1d Iustin Pop
    for idx, nic in enumerate(self.op.nics):
6444 22f50b1d Iustin Pop
      nic_mode_req = nic.get("mode", None)
6445 22f50b1d Iustin Pop
      nic_mode = nic_mode_req
6446 22f50b1d Iustin Pop
      if nic_mode is None:
6447 22f50b1d Iustin Pop
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6448 22f50b1d Iustin Pop
6449 22f50b1d Iustin Pop
      # in routed mode, for the first nic, the default ip is 'auto'
6450 22f50b1d Iustin Pop
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6451 22f50b1d Iustin Pop
        default_ip_mode = constants.VALUE_AUTO
6452 22f50b1d Iustin Pop
      else:
6453 22f50b1d Iustin Pop
        default_ip_mode = constants.VALUE_NONE
6454 22f50b1d Iustin Pop
6455 22f50b1d Iustin Pop
      # ip validity checks
6456 22f50b1d Iustin Pop
      ip = nic.get("ip", default_ip_mode)
6457 22f50b1d Iustin Pop
      if ip is None or ip.lower() == constants.VALUE_NONE:
6458 22f50b1d Iustin Pop
        nic_ip = None
6459 22f50b1d Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
6460 22f50b1d Iustin Pop
        if not self.op.name_check:
6461 22f50b1d Iustin Pop
          raise errors.OpPrereqError("IP address set to auto but name checks"
6462 22f50b1d Iustin Pop
                                     " have been skipped. Aborting.",
6463 22f50b1d Iustin Pop
                                     errors.ECODE_INVAL)
6464 22f50b1d Iustin Pop
        nic_ip = self.hostname1.ip
6465 22f50b1d Iustin Pop
      else:
6466 22f50b1d Iustin Pop
        if not utils.IsValidIP(ip):
6467 22f50b1d Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6468 22f50b1d Iustin Pop
                                     " like a valid IP" % ip,
6469 22f50b1d Iustin Pop
                                     errors.ECODE_INVAL)
6470 22f50b1d Iustin Pop
        nic_ip = ip
6471 22f50b1d Iustin Pop
6472 22f50b1d Iustin Pop
      # TODO: check the ip address for uniqueness
6473 22f50b1d Iustin Pop
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6474 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6475 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6476 22f50b1d Iustin Pop
6477 22f50b1d Iustin Pop
      # MAC address verification
6478 22f50b1d Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
6479 22f50b1d Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6480 22f50b1d Iustin Pop
        mac = utils.NormalizeAndValidateMac(mac)
6481 22f50b1d Iustin Pop
6482 22f50b1d Iustin Pop
        try:
6483 22f50b1d Iustin Pop
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
6484 22f50b1d Iustin Pop
        except errors.ReservationError:
6485 22f50b1d Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
6486 22f50b1d Iustin Pop
                                     " in cluster" % mac,
6487 22f50b1d Iustin Pop
                                     errors.ECODE_NOTUNIQUE)
6488 22f50b1d Iustin Pop
6489 22f50b1d Iustin Pop
      # bridge verification
6490 22f50b1d Iustin Pop
      bridge = nic.get("bridge", None)
6491 22f50b1d Iustin Pop
      link = nic.get("link", None)
6492 22f50b1d Iustin Pop
      if bridge and link:
6493 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6494 22f50b1d Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
6495 22f50b1d Iustin Pop
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
6496 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
6497 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6498 22f50b1d Iustin Pop
      elif bridge:
6499 22f50b1d Iustin Pop
        link = bridge
6500 22f50b1d Iustin Pop
6501 22f50b1d Iustin Pop
      nicparams = {}
6502 22f50b1d Iustin Pop
      if nic_mode_req:
6503 22f50b1d Iustin Pop
        nicparams[constants.NIC_MODE] = nic_mode_req
6504 22f50b1d Iustin Pop
      if link:
6505 22f50b1d Iustin Pop
        nicparams[constants.NIC_LINK] = link
6506 22f50b1d Iustin Pop
6507 22f50b1d Iustin Pop
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
6508 22f50b1d Iustin Pop
                                      nicparams)
6509 22f50b1d Iustin Pop
      objects.NIC.CheckParameterSyntax(check_params)
6510 22f50b1d Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
6511 22f50b1d Iustin Pop
6512 22f50b1d Iustin Pop
    # disk checks/pre-build
6513 22f50b1d Iustin Pop
    self.disks = []
6514 22f50b1d Iustin Pop
    for disk in self.op.disks:
6515 22f50b1d Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
6516 22f50b1d Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
6517 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
6518 22f50b1d Iustin Pop
                                   mode, errors.ECODE_INVAL)
6519 22f50b1d Iustin Pop
      size = disk.get("size", None)
6520 22f50b1d Iustin Pop
      if size is None:
6521 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
6522 22f50b1d Iustin Pop
      try:
6523 22f50b1d Iustin Pop
        size = int(size)
6524 22f50b1d Iustin Pop
      except (TypeError, ValueError):
6525 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
6526 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6527 22f50b1d Iustin Pop
      new_disk = {"size": size, "mode": mode}
6528 22f50b1d Iustin Pop
      if "adopt" in disk:
6529 22f50b1d Iustin Pop
        new_disk["adopt"] = disk["adopt"]
6530 22f50b1d Iustin Pop
      self.disks.append(new_disk)
6531 22f50b1d Iustin Pop
6532 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6533 a8083063 Iustin Pop
6534 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
6535 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
6536 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6537 09acf207 Guido Trotter
      if instance_disks < export_disks:
6538 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
6539 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
6540 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
6541 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6542 a8083063 Iustin Pop
6543 09acf207 Guido Trotter
      disk_images = []
6544 09acf207 Guido Trotter
      for idx in range(export_disks):
6545 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
6546 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
6547 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
6548 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
6549 c1c31426 Iustin Pop
          image = utils.PathJoin(self.op.src_path, export_name)
6550 09acf207 Guido Trotter
          disk_images.append(image)
6551 09acf207 Guido Trotter
        else:
6552 09acf207 Guido Trotter
          disk_images.append(False)
6553 09acf207 Guido Trotter
6554 09acf207 Guido Trotter
      self.src_images = disk_images
6555 901a65c1 Iustin Pop
6556 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
6557 2a518543 Iustin Pop
      try:
6558 2a518543 Iustin Pop
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
6559 2a518543 Iustin Pop
      except (TypeError, ValueError), err:
6560 2a518543 Iustin Pop
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
6561 2a518543 Iustin Pop
                                   " an integer: %s" % str(err),
6562 2a518543 Iustin Pop
                                   errors.ECODE_STATE)
6563 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
6564 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
6565 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6566 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
6567 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6568 bc89efc3 Guido Trotter
6569 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6570 901a65c1 Iustin Pop
6571 18c8f361 Iustin Pop
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6572 901a65c1 Iustin Pop
    if self.op.ip_check:
6573 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6574 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6575 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
6576 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
6577 901a65c1 Iustin Pop
6578 295728df Guido Trotter
    #### mac address generation
6579 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
6580 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
6581 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
6582 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
6583 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
6584 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
6585 295728df Guido Trotter
    # creation job will fail.
6586 295728df Guido Trotter
    for nic in self.nics:
6587 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6588 36b66e6e Guido Trotter
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6589 295728df Guido Trotter
6590 538475ca Iustin Pop
    #### allocator run
6591 538475ca Iustin Pop
6592 538475ca Iustin Pop
    if self.op.iallocator is not None:
6593 538475ca Iustin Pop
      self._RunAllocator()
6594 0f1a06e3 Manuel Franceschini
6595 901a65c1 Iustin Pop
    #### node related checks
6596 901a65c1 Iustin Pop
6597 901a65c1 Iustin Pop
    # check primary node
6598 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6599 7baf741d Guido Trotter
    assert self.pnode is not None, \
6600 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
6601 7527a8a4 Iustin Pop
    if pnode.offline:
6602 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6603 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6604 733a2b6a Iustin Pop
    if pnode.drained:
6605 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6606 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6607 7527a8a4 Iustin Pop
6608 901a65c1 Iustin Pop
    self.secondaries = []
6609 901a65c1 Iustin Pop
6610 901a65c1 Iustin Pop
    # mirror node verification
6611 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6612 7baf741d Guido Trotter
      if self.op.snode is None:
6613 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
6614 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
6615 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
6616 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
6617 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
6618 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
6619 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
6620 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
6621 a8083063 Iustin Pop
6622 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
6623 6785674e Iustin Pop
6624 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
6625 08db7c5c Iustin Pop
                                self.disks)
6626 ed1ebc60 Guido Trotter
6627 c3589cf8 Iustin Pop
    # Check lv size requirements, if not adopting
6628 c3589cf8 Iustin Pop
    if req_size is not None and not self.adopt_disks:
6629 701384a9 Iustin Pop
      _CheckNodesFreeDisk(self, nodenames, req_size)
6630 ed1ebc60 Guido Trotter
6631 c3589cf8 Iustin Pop
    if self.adopt_disks: # instead, we must check the adoption data
6632 c3589cf8 Iustin Pop
      all_lvs = set([i["adopt"] for i in self.disks])
6633 c3589cf8 Iustin Pop
      if len(all_lvs) != len(self.disks):
6634 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
6635 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6636 c3589cf8 Iustin Pop
      for lv_name in all_lvs:
6637 c3589cf8 Iustin Pop
        try:
6638 c3589cf8 Iustin Pop
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
6639 c3589cf8 Iustin Pop
        except errors.ReservationError:
6640 c3589cf8 Iustin Pop
          raise errors.OpPrereqError("LV named %s used by another instance" %
6641 c3589cf8 Iustin Pop
                                     lv_name, errors.ECODE_NOTUNIQUE)
6642 c3589cf8 Iustin Pop
6643 c3589cf8 Iustin Pop
      node_lvs = self.rpc.call_lv_list([pnode.name],
6644 c3589cf8 Iustin Pop
                                       self.cfg.GetVGName())[pnode.name]
6645 c3589cf8 Iustin Pop
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
6646 c3589cf8 Iustin Pop
      node_lvs = node_lvs.payload
6647 c3589cf8 Iustin Pop
      delta = all_lvs.difference(node_lvs.keys())
6648 c3589cf8 Iustin Pop
      if delta:
6649 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
6650 c3589cf8 Iustin Pop
                                   utils.CommaJoin(delta),
6651 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6652 c3589cf8 Iustin Pop
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
6653 c3589cf8 Iustin Pop
      if online_lvs:
6654 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Online logical volumes found, cannot"
6655 c3589cf8 Iustin Pop
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
6656 c3589cf8 Iustin Pop
                                   errors.ECODE_STATE)
6657 c3589cf8 Iustin Pop
      # update the size of disk based on what is found
6658 c3589cf8 Iustin Pop
      for dsk in self.disks:
6659 c3589cf8 Iustin Pop
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
6660 c3589cf8 Iustin Pop
6661 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6662 6785674e Iustin Pop
6663 231cd901 Iustin Pop
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
6664 a8083063 Iustin Pop
6665 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6666 a8083063 Iustin Pop
6667 49ce1563 Iustin Pop
    # memory check on primary node
6668 49ce1563 Iustin Pop
    if self.op.start:
6669 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
6670 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
6671 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
6672 338e51e8 Iustin Pop
                           self.op.hypervisor)
6673 49ce1563 Iustin Pop
6674 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6675 08896026 Iustin Pop
6676 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6677 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6678 a8083063 Iustin Pop

6679 a8083063 Iustin Pop
    """
6680 a8083063 Iustin Pop
    instance = self.op.instance_name
6681 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6682 a8083063 Iustin Pop
6683 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6684 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6685 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6686 2a6469d5 Alexander Schreiber
    else:
6687 2a6469d5 Alexander Schreiber
      network_port = None
6688 58acb49d Alexander Schreiber
6689 0e3baaf3 Iustin Pop
    if constants.ENABLE_FILE_STORAGE:
6690 0e3baaf3 Iustin Pop
      # this is needed because os.path.join does not accept None arguments
6691 0e3baaf3 Iustin Pop
      if self.op.file_storage_dir is None:
6692 0e3baaf3 Iustin Pop
        string_file_storage_dir = ""
6693 0e3baaf3 Iustin Pop
      else:
6694 0e3baaf3 Iustin Pop
        string_file_storage_dir = self.op.file_storage_dir
6695 31a853d2 Iustin Pop
6696 0e3baaf3 Iustin Pop
      # build the full file storage dir path
6697 0e3baaf3 Iustin Pop
      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6698 0e3baaf3 Iustin Pop
                                        string_file_storage_dir, instance)
6699 2c313123 Manuel Franceschini
    else:
6700 0e3baaf3 Iustin Pop
      file_storage_dir = ""
6701 0f1a06e3 Manuel Franceschini
6702 0f1a06e3 Manuel Franceschini
6703 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6704 a8083063 Iustin Pop
                                  self.op.disk_template,
6705 a8083063 Iustin Pop
                                  instance, pnode_name,
6706 08db7c5c Iustin Pop
                                  self.secondaries,
6707 08db7c5c Iustin Pop
                                  self.disks,
6708 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6709 e2a65344 Iustin Pop
                                  self.op.file_driver,
6710 e2a65344 Iustin Pop
                                  0)
6711 a8083063 Iustin Pop
6712 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6713 a8083063 Iustin Pop
                            primary_node=pnode_name,
6714 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6715 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6716 4978db17 Iustin Pop
                            admin_up=False,
6717 58acb49d Alexander Schreiber
                            network_port=network_port,
6718 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6719 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6720 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6721 a8083063 Iustin Pop
                            )
6722 a8083063 Iustin Pop
6723 c3589cf8 Iustin Pop
    if self.adopt_disks:
6724 c3589cf8 Iustin Pop
      # rename LVs to the newly-generated names; we need to construct
6725 c3589cf8 Iustin Pop
      # 'fake' LV disks with the old data, plus the new unique_id
6726 c3589cf8 Iustin Pop
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
6727 c3589cf8 Iustin Pop
      rename_to = []
6728 c3589cf8 Iustin Pop
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
6729 c3589cf8 Iustin Pop
        rename_to.append(t_dsk.logical_id)
6730 c3589cf8 Iustin Pop
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
6731 c3589cf8 Iustin Pop
        self.cfg.SetDiskID(t_dsk, pnode_name)
6732 c3589cf8 Iustin Pop
      result = self.rpc.call_blockdev_rename(pnode_name,
6733 c3589cf8 Iustin Pop
                                             zip(tmp_disks, rename_to))
6734 c3589cf8 Iustin Pop
      result.Raise("Failed to rename adoped LVs")
6735 c3589cf8 Iustin Pop
    else:
6736 c3589cf8 Iustin Pop
      feedback_fn("* creating instance disks...")
6737 796cab27 Iustin Pop
      try:
6738 c3589cf8 Iustin Pop
        _CreateDisks(self, iobj)
6739 c3589cf8 Iustin Pop
      except errors.OpExecError:
6740 c3589cf8 Iustin Pop
        self.LogWarning("Device creation failed, reverting...")
6741 c3589cf8 Iustin Pop
        try:
6742 c3589cf8 Iustin Pop
          _RemoveDisks(self, iobj)
6743 c3589cf8 Iustin Pop
        finally:
6744 c3589cf8 Iustin Pop
          self.cfg.ReleaseDRBDMinors(instance)
6745 c3589cf8 Iustin Pop
          raise
6746 a8083063 Iustin Pop
6747 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6748 a8083063 Iustin Pop
6749 0debfb35 Guido Trotter
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6750 0debfb35 Guido Trotter
6751 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6752 7baf741d Guido Trotter
    # added the instance to the config
6753 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6754 e36e96b4 Guido Trotter
    # Unlock all the nodes
6755 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6756 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6757 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6758 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6759 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6760 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6761 9c8971d7 Guido Trotter
    else:
6762 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6763 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6764 a8083063 Iustin Pop
6765 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6766 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6767 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6768 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6769 a8083063 Iustin Pop
      time.sleep(15)
6770 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6771 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6772 a8083063 Iustin Pop
    else:
6773 a8083063 Iustin Pop
      disk_abort = False
6774 a8083063 Iustin Pop
6775 a8083063 Iustin Pop
    if disk_abort:
6776 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6777 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6778 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6779 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6780 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6781 3ecf6786 Iustin Pop
                               " this instance")
6782 a8083063 Iustin Pop
6783 c3589cf8 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
6784 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6785 25a8792c Iustin Pop
        if not self.op.no_install:
6786 25a8792c Iustin Pop
          feedback_fn("* running the instance OS create scripts...")
6787 25a8792c Iustin Pop
          # FIXME: pass debug option from opcode to backend
6788 25a8792c Iustin Pop
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6789 25a8792c Iustin Pop
                                                 self.op.debug_level)
6790 25a8792c Iustin Pop
          result.Raise("Could not add os for instance %s"
6791 25a8792c Iustin Pop
                       " on node %s" % (instance, pnode_name))
6792 a8083063 Iustin Pop
6793 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6794 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6795 a8083063 Iustin Pop
        src_node = self.op.src_node
6796 09acf207 Guido Trotter
        src_images = self.src_images
6797 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6798 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6799 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6800 09acf207 Guido Trotter
                                                         src_node, src_images,
6801 dd713605 Iustin Pop
                                                         cluster_name,
6802 dd713605 Iustin Pop
                                                         self.op.debug_level)
6803 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6804 944bf548 Iustin Pop
        if msg:
6805 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6806 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6807 a8083063 Iustin Pop
      else:
6808 a8083063 Iustin Pop
        # also checked in the prereq part
6809 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6810 3ecf6786 Iustin Pop
                                     % self.op.mode)
6811 a8083063 Iustin Pop
6812 a8083063 Iustin Pop
    if self.op.start:
6813 4978db17 Iustin Pop
      iobj.admin_up = True
6814 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6815 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6816 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6817 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6818 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6819 a8083063 Iustin Pop
6820 08896026 Iustin Pop
    return list(iobj.all_nodes)
6821 08896026 Iustin Pop
6822 a8083063 Iustin Pop
6823 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6824 a8083063 Iustin Pop
  """Connect to an instance's console.
6825 a8083063 Iustin Pop

6826 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6827 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6828 a8083063 Iustin Pop
  console.
6829 a8083063 Iustin Pop

6830 a8083063 Iustin Pop
  """
6831 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6832 8659b73e Guido Trotter
  REQ_BGL = False
6833 8659b73e Guido Trotter
6834 8659b73e Guido Trotter
  def ExpandNames(self):
6835 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6836 a8083063 Iustin Pop
6837 a8083063 Iustin Pop
  def CheckPrereq(self):
6838 a8083063 Iustin Pop
    """Check prerequisites.
6839 a8083063 Iustin Pop

6840 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6841 a8083063 Iustin Pop

6842 a8083063 Iustin Pop
    """
6843 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6844 8659b73e Guido Trotter
    assert self.instance is not None, \
6845 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6846 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6847 a8083063 Iustin Pop
6848 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6849 a8083063 Iustin Pop
    """Connect to the console of an instance
6850 a8083063 Iustin Pop

6851 a8083063 Iustin Pop
    """
6852 a8083063 Iustin Pop
    instance = self.instance
6853 a8083063 Iustin Pop
    node = instance.primary_node
6854 a8083063 Iustin Pop
6855 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6856 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6857 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6858 a8083063 Iustin Pop
6859 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6860 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6861 a8083063 Iustin Pop
6862 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6863 a8083063 Iustin Pop
6864 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6865 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6866 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6867 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6868 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6869 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6870 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6871 b047857b Michael Hanselmann
6872 82122173 Iustin Pop
    # build ssh cmdline
6873 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6874 a8083063 Iustin Pop
6875 a8083063 Iustin Pop
6876 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6877 a8083063 Iustin Pop
  """Replace the disks of an instance.
6878 a8083063 Iustin Pop

6879 a8083063 Iustin Pop
  """
6880 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6881 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6882 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6883 efd990e4 Guido Trotter
  REQ_BGL = False
6884 efd990e4 Guido Trotter
6885 7e9366f7 Iustin Pop
  def CheckArguments(self):
6886 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6887 efd990e4 Guido Trotter
      self.op.remote_node = None
6888 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6889 7e9366f7 Iustin Pop
      self.op.iallocator = None
6890 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6891 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6892 7e9366f7 Iustin Pop
6893 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6894 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6895 7e9366f7 Iustin Pop
6896 7e9366f7 Iustin Pop
  def ExpandNames(self):
6897 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6898 7e9366f7 Iustin Pop
6899 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6900 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6901 2bb5c911 Michael Hanselmann
6902 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6903 cf26a87a Iustin Pop
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6904 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6905 2bb5c911 Michael Hanselmann
6906 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6907 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6908 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6909 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6910 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6911 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6912 2bb5c911 Michael Hanselmann
6913 efd990e4 Guido Trotter
    else:
6914 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6915 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6916 efd990e4 Guido Trotter
6917 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6918 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6919 7ea7bcf6 Iustin Pop
                                   self.op.disks, False, self.op.early_release)
6920 c68174b6 Michael Hanselmann
6921 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6922 2bb5c911 Michael Hanselmann
6923 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6924 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6925 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6926 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6927 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6928 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6929 a8083063 Iustin Pop
6930 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6931 a8083063 Iustin Pop
    """Build hooks env.
6932 a8083063 Iustin Pop

6933 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6934 a8083063 Iustin Pop

6935 a8083063 Iustin Pop
    """
6936 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6937 a8083063 Iustin Pop
    env = {
6938 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6939 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6940 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6941 a8083063 Iustin Pop
      }
6942 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6943 0834c866 Iustin Pop
    nl = [
6944 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6945 2bb5c911 Michael Hanselmann
      instance.primary_node,
6946 0834c866 Iustin Pop
      ]
6947 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6948 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6949 a8083063 Iustin Pop
    return env, nl, nl
6950 a8083063 Iustin Pop
6951 2bb5c911 Michael Hanselmann
6952 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6953 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6954 7ffc5a86 Michael Hanselmann

6955 7ffc5a86 Michael Hanselmann
  """
6956 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6957 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6958 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6959 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6960 7ffc5a86 Michael Hanselmann
6961 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6962 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6963 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6964 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6965 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6966 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6967 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6968 7ffc5a86 Michael Hanselmann
6969 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6970 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6971 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6972 7ffc5a86 Michael Hanselmann
6973 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6974 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6975 7ffc5a86 Michael Hanselmann
6976 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6977 7ffc5a86 Michael Hanselmann
6978 7ffc5a86 Michael Hanselmann
    # Declare node locks
6979 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6980 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6981 7ffc5a86 Michael Hanselmann
6982 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6983 cf26a87a Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6984 7ffc5a86 Michael Hanselmann
6985 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6986 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6987 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6988 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6989 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
6990 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6991 7ffc5a86 Michael Hanselmann
6992 7ffc5a86 Michael Hanselmann
    else:
6993 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
6994 7ffc5a86 Michael Hanselmann
6995 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6996 7ffc5a86 Michael Hanselmann
    # node
6997 7ffc5a86 Michael Hanselmann
    names = []
6998 3a012b41 Michael Hanselmann
    tasklets = []
6999 7ffc5a86 Michael Hanselmann
7000 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
7001 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
7002 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
7003 7ffc5a86 Michael Hanselmann
7004 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
7005 94a1b377 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [],
7006 7ea7bcf6 Iustin Pop
                                True, self.op.early_release)
7007 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
7008 7ffc5a86 Michael Hanselmann
7009 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
7010 7ffc5a86 Michael Hanselmann
    self.instance_names = names
7011 7ffc5a86 Michael Hanselmann
7012 7ffc5a86 Michael Hanselmann
    # Declare instance locks
7013 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
7014 7ffc5a86 Michael Hanselmann
7015 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
7016 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
7017 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
7018 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
7019 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7020 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
7021 7ffc5a86 Michael Hanselmann
7022 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
7023 7ffc5a86 Michael Hanselmann
    """Build hooks env.
7024 7ffc5a86 Michael Hanselmann

7025 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
7026 7ffc5a86 Michael Hanselmann

7027 7ffc5a86 Michael Hanselmann
    """
7028 7ffc5a86 Michael Hanselmann
    env = {
7029 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
7030 7ffc5a86 Michael Hanselmann
      }
7031 7ffc5a86 Michael Hanselmann
7032 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
7033 7ffc5a86 Michael Hanselmann
7034 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
7035 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
7036 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
7037 7ffc5a86 Michael Hanselmann
7038 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
7039 7ffc5a86 Michael Hanselmann
7040 7ffc5a86 Michael Hanselmann
7041 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
7042 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
7043 2bb5c911 Michael Hanselmann

7044 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
7045 2bb5c911 Michael Hanselmann

7046 2bb5c911 Michael Hanselmann
  """
7047 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7048 7ea7bcf6 Iustin Pop
               disks, delay_iallocator, early_release):
7049 2bb5c911 Michael Hanselmann
    """Initializes this class.
7050 2bb5c911 Michael Hanselmann

7051 2bb5c911 Michael Hanselmann
    """
7052 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
7053 464243a7 Michael Hanselmann
7054 2bb5c911 Michael Hanselmann
    # Parameters
7055 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
7056 2bb5c911 Michael Hanselmann
    self.mode = mode
7057 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
7058 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
7059 2bb5c911 Michael Hanselmann
    self.disks = disks
7060 94a1b377 Michael Hanselmann
    self.delay_iallocator = delay_iallocator
7061 7ea7bcf6 Iustin Pop
    self.early_release = early_release
7062 2bb5c911 Michael Hanselmann
7063 2bb5c911 Michael Hanselmann
    # Runtime data
7064 2bb5c911 Michael Hanselmann
    self.instance = None
7065 2bb5c911 Michael Hanselmann
    self.new_node = None
7066 2bb5c911 Michael Hanselmann
    self.target_node = None
7067 2bb5c911 Michael Hanselmann
    self.other_node = None
7068 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
7069 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
7070 2bb5c911 Michael Hanselmann
7071 2bb5c911 Michael Hanselmann
  @staticmethod
7072 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
7073 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
7074 c68174b6 Michael Hanselmann

7075 c68174b6 Michael Hanselmann
    """
7076 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
7077 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
7078 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
7079 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
7080 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
7081 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
7082 02a00186 Michael Hanselmann
7083 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
7084 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
7085 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
7086 02a00186 Michael Hanselmann
7087 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
7088 02a00186 Michael Hanselmann
      # Not replacing the secondary
7089 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
7090 02a00186 Michael Hanselmann
                                 " only be used when changing the"
7091 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
7092 2bb5c911 Michael Hanselmann
7093 2bb5c911 Michael Hanselmann
  @staticmethod
7094 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7095 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
7096 2bb5c911 Michael Hanselmann

7097 2bb5c911 Michael Hanselmann
    """
7098 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
7099 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
7100 2bb5c911 Michael Hanselmann
                     name=instance_name,
7101 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
7102 2bb5c911 Michael Hanselmann
7103 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
7104 2bb5c911 Michael Hanselmann
7105 2bb5c911 Michael Hanselmann
    if not ial.success:
7106 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7107 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
7108 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
7109 2bb5c911 Michael Hanselmann
7110 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
7111 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7112 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
7113 d984846d Iustin Pop
                                 (iallocator_name,
7114 680f0a89 Iustin Pop
                                  len(ial.result), ial.required_nodes),
7115 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
7116 2bb5c911 Michael Hanselmann
7117 680f0a89 Iustin Pop
    remote_node_name = ial.result[0]
7118 2bb5c911 Michael Hanselmann
7119 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
7120 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
7121 2bb5c911 Michael Hanselmann
7122 2bb5c911 Michael Hanselmann
    return remote_node_name
7123 2bb5c911 Michael Hanselmann
7124 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
7125 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7126 2d9005d8 Michael Hanselmann
                                    node_name, True)
7127 942be002 Michael Hanselmann
7128 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
7129 2bb5c911 Michael Hanselmann
    """Check prerequisites.
7130 2bb5c911 Michael Hanselmann

7131 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
7132 2bb5c911 Michael Hanselmann

7133 2bb5c911 Michael Hanselmann
    """
7134 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7135 e9022531 Iustin Pop
    assert instance is not None, \
7136 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
7137 2bb5c911 Michael Hanselmann
7138 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
7139 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7140 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
7141 a8083063 Iustin Pop
7142 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7143 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
7144 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
7145 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
7146 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
7147 a8083063 Iustin Pop
7148 94a1b377 Michael Hanselmann
    if not self.delay_iallocator:
7149 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7150 94a1b377 Michael Hanselmann
7151 94a1b377 Michael Hanselmann
  def _CheckPrereq2(self):
7152 94a1b377 Michael Hanselmann
    """Check prerequisites, second part.
7153 94a1b377 Michael Hanselmann

7154 94a1b377 Michael Hanselmann
    This function should always be part of CheckPrereq. It was separated and is
7155 94a1b377 Michael Hanselmann
    now called from Exec because during node evacuation iallocator was only
7156 94a1b377 Michael Hanselmann
    called with an unmodified cluster model, not taking planned changes into
7157 94a1b377 Michael Hanselmann
    account.
7158 94a1b377 Michael Hanselmann

7159 94a1b377 Michael Hanselmann
    """
7160 94a1b377 Michael Hanselmann
    instance = self.instance
7161 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
7162 a9e0c397 Iustin Pop
7163 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
7164 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
7165 2bb5c911 Michael Hanselmann
    else:
7166 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7167 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
7168 b6e82a65 Iustin Pop
7169 a9e0c397 Iustin Pop
    if remote_node is not None:
7170 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7171 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
7172 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
7173 a9e0c397 Iustin Pop
    else:
7174 a9e0c397 Iustin Pop
      self.remote_node_info = None
7175 2bb5c911 Michael Hanselmann
7176 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
7177 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
7178 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
7179 2bb5c911 Michael Hanselmann
7180 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
7181 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
7182 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
7183 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7184 7e9366f7 Iustin Pop
7185 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7186 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
7187 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7188 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7189 942be002 Michael Hanselmann
7190 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
7191 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7192 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7193 942be002 Michael Hanselmann
7194 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
7195 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7196 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
7197 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
7198 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7199 942be002 Michael Hanselmann
7200 942be002 Michael Hanselmann
      if faulty_primary:
7201 942be002 Michael Hanselmann
        self.disks = faulty_primary
7202 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7203 942be002 Michael Hanselmann
        self.other_node = secondary_node
7204 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7205 942be002 Michael Hanselmann
      elif faulty_secondary:
7206 942be002 Michael Hanselmann
        self.disks = faulty_secondary
7207 942be002 Michael Hanselmann
        self.target_node = secondary_node
7208 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7209 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7210 942be002 Michael Hanselmann
      else:
7211 942be002 Michael Hanselmann
        self.disks = []
7212 942be002 Michael Hanselmann
        check_nodes = []
7213 942be002 Michael Hanselmann
7214 942be002 Michael Hanselmann
    else:
7215 942be002 Michael Hanselmann
      # Non-automatic modes
7216 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
7217 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7218 942be002 Michael Hanselmann
        self.other_node = secondary_node
7219 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7220 7e9366f7 Iustin Pop
7221 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
7222 942be002 Michael Hanselmann
        self.target_node = secondary_node
7223 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7224 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7225 a9e0c397 Iustin Pop
7226 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
7227 942be002 Michael Hanselmann
        self.new_node = remote_node
7228 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7229 942be002 Michael Hanselmann
        self.target_node = secondary_node
7230 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
7231 54155f52 Iustin Pop
7232 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
7233 a8083063 Iustin Pop
7234 9af0fa6a Iustin Pop
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7235 9af0fa6a Iustin Pop
        assert old_node_info is not None
7236 9af0fa6a Iustin Pop
        if old_node_info.offline and not self.early_release:
7237 9af0fa6a Iustin Pop
          # doesn't make sense to delay the release
7238 9af0fa6a Iustin Pop
          self.early_release = True
7239 9af0fa6a Iustin Pop
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7240 9af0fa6a Iustin Pop
                          " early-release mode", secondary_node)
7241 9af0fa6a Iustin Pop
7242 942be002 Michael Hanselmann
      else:
7243 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7244 942be002 Michael Hanselmann
                                     self.mode)
7245 942be002 Michael Hanselmann
7246 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
7247 942be002 Michael Hanselmann
      if not self.disks:
7248 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
7249 a9e0c397 Iustin Pop
7250 2bb5c911 Michael Hanselmann
    for node in check_nodes:
7251 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
7252 e4376078 Iustin Pop
7253 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
7254 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
7255 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
7256 e4376078 Iustin Pop
7257 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
7258 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
7259 e4376078 Iustin Pop
7260 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
7261 2bb5c911 Michael Hanselmann
      if node_name is not None:
7262 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7263 e4376078 Iustin Pop
7264 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
7265 a9e0c397 Iustin Pop
7266 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
7267 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
7268 2bb5c911 Michael Hanselmann

7269 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
7270 cff90b79 Iustin Pop

7271 a9e0c397 Iustin Pop
    """
7272 94a1b377 Michael Hanselmann
    if self.delay_iallocator:
7273 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7274 94a1b377 Michael Hanselmann
7275 942be002 Michael Hanselmann
    if not self.disks:
7276 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
7277 942be002 Michael Hanselmann
      return
7278 942be002 Michael Hanselmann
7279 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
7280 1f864b60 Iustin Pop
                (utils.CommaJoin(self.disks), self.instance.name))
7281 7ffc5a86 Michael Hanselmann
7282 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
7283 2bb5c911 Michael Hanselmann
7284 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
7285 2bb5c911 Michael Hanselmann
    if activate_disks:
7286 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
7287 2bb5c911 Michael Hanselmann
7288 2bb5c911 Michael Hanselmann
    try:
7289 942be002 Michael Hanselmann
      # Should we replace the secondary node?
7290 942be002 Michael Hanselmann
      if self.new_node is not None:
7291 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
7292 2bb5c911 Michael Hanselmann
      else:
7293 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
7294 a4eae71f Michael Hanselmann
7295 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
7296 2bb5c911 Michael Hanselmann
7297 2bb5c911 Michael Hanselmann
    finally:
7298 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
7299 5c983ee5 Iustin Pop
      # down instance
7300 2bb5c911 Michael Hanselmann
      if activate_disks:
7301 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7302 2bb5c911 Michael Hanselmann
7303 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
7304 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
7305 2bb5c911 Michael Hanselmann
7306 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
7307 cff90b79 Iustin Pop
7308 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
7309 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
7310 cff90b79 Iustin Pop
    if not results:
7311 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
7312 2bb5c911 Michael Hanselmann
7313 2bb5c911 Michael Hanselmann
    for node in nodes:
7314 781de953 Iustin Pop
      res = results[node]
7315 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
7316 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
7317 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7318 2bb5c911 Michael Hanselmann
                                 (vgname, node))
7319 2bb5c911 Michael Hanselmann
7320 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
7321 2bb5c911 Michael Hanselmann
    # Check disk existence
7322 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7323 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7324 cff90b79 Iustin Pop
        continue
7325 2bb5c911 Michael Hanselmann
7326 2bb5c911 Michael Hanselmann
      for node in nodes:
7327 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7328 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
7329 2bb5c911 Michael Hanselmann
7330 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
7331 2bb5c911 Michael Hanselmann
7332 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7333 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
7334 2bb5c911 Michael Hanselmann
          if not msg:
7335 2bb5c911 Michael Hanselmann
            msg = "disk not found"
7336 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7337 23829f6f Iustin Pop
                                   (idx, node, msg))
7338 cff90b79 Iustin Pop
7339 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7340 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7341 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7342 cff90b79 Iustin Pop
        continue
7343 cff90b79 Iustin Pop
7344 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7345 2bb5c911 Michael Hanselmann
                      (idx, node_name))
7346 2bb5c911 Michael Hanselmann
7347 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7348 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
7349 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7350 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
7351 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
7352 2bb5c911 Michael Hanselmann
7353 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
7354 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
7355 2bb5c911 Michael Hanselmann
    iv_names = {}
7356 2bb5c911 Michael Hanselmann
7357 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7358 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7359 a9e0c397 Iustin Pop
        continue
7360 2bb5c911 Michael Hanselmann
7361 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7362 2bb5c911 Michael Hanselmann
7363 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7364 2bb5c911 Michael Hanselmann
7365 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7366 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
7367 2bb5c911 Michael Hanselmann
7368 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7369 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
7370 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7371 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
7372 2bb5c911 Michael Hanselmann
7373 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
7374 a9e0c397 Iustin Pop
      old_lvs = dev.children
7375 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7376 2bb5c911 Michael Hanselmann
7377 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
7378 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
7379 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7380 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7381 2bb5c911 Michael Hanselmann
7382 2bb5c911 Michael Hanselmann
    return iv_names
7383 2bb5c911 Michael Hanselmann
7384 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
7385 1122eb25 Iustin Pop
    for name, (dev, _, _) in iv_names.iteritems():
7386 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7387 2bb5c911 Michael Hanselmann
7388 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
7389 2bb5c911 Michael Hanselmann
7390 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
7391 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
7392 2bb5c911 Michael Hanselmann
        if not msg:
7393 2bb5c911 Michael Hanselmann
          msg = "disk not found"
7394 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7395 2bb5c911 Michael Hanselmann
                                 (name, msg))
7396 2bb5c911 Michael Hanselmann
7397 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
7398 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7399 2bb5c911 Michael Hanselmann
7400 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
7401 1122eb25 Iustin Pop
    for name, (_, old_lvs, _) in iv_names.iteritems():
7402 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7403 2bb5c911 Michael Hanselmann
7404 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
7405 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
7406 2bb5c911 Michael Hanselmann
7407 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7408 2bb5c911 Michael Hanselmann
        if msg:
7409 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7410 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
7411 2bb5c911 Michael Hanselmann
7412 7ea7bcf6 Iustin Pop
  def _ReleaseNodeLock(self, node_name):
7413 7ea7bcf6 Iustin Pop
    """Releases the lock for a given node."""
7414 7ea7bcf6 Iustin Pop
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7415 7ea7bcf6 Iustin Pop
7416 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7417 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
7418 2bb5c911 Michael Hanselmann

7419 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
7420 2bb5c911 Michael Hanselmann

7421 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
7422 2bb5c911 Michael Hanselmann

7423 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
7424 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
7425 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
7426 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
7427 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
7428 2bb5c911 Michael Hanselmann

7429 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
7430 2bb5c911 Michael Hanselmann

7431 2bb5c911 Michael Hanselmann
      1. for each modified disk:
7432 2bb5c911 Michael Hanselmann

7433 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
7434 2bb5c911 Michael Hanselmann

7435 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
7436 2bb5c911 Michael Hanselmann

7437 2bb5c911 Michael Hanselmann
    """
7438 2bb5c911 Michael Hanselmann
    steps_total = 6
7439 2bb5c911 Michael Hanselmann
7440 2bb5c911 Michael Hanselmann
    # Step: check device activation
7441 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7442 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
7443 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
7444 2bb5c911 Michael Hanselmann
7445 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
7446 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7447 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
7448 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
7449 2bb5c911 Michael Hanselmann
                                False)
7450 2bb5c911 Michael Hanselmann
7451 2bb5c911 Michael Hanselmann
    # Step: create new storage
7452 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7453 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
7454 a9e0c397 Iustin Pop
7455 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
7456 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7457 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7458 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7459 2bb5c911 Michael Hanselmann
7460 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7461 4d4a651d Michael Hanselmann
                                                     old_lvs)
7462 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
7463 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
7464 cff90b79 Iustin Pop
      #dev.children = []
7465 cff90b79 Iustin Pop
      #cfg.Update(instance)
7466 a9e0c397 Iustin Pop
7467 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
7468 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
7469 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7470 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
7471 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
7472 cff90b79 Iustin Pop
7473 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
7474 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
7475 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
7476 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
7477 2bb5c911 Michael Hanselmann
7478 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
7479 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
7480 cff90b79 Iustin Pop
      for to_ren in old_lvs:
7481 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7482 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
7483 23829f6f Iustin Pop
          # device exists
7484 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7485 cff90b79 Iustin Pop
7486 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
7487 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7488 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
7489 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7490 2bb5c911 Michael Hanselmann
7491 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
7492 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
7493 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
7494 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
7495 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7496 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
7497 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7498 cff90b79 Iustin Pop
7499 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
7500 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
7501 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
7502 a9e0c397 Iustin Pop
7503 cff90b79 Iustin Pop
      for disk in old_lvs:
7504 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
7505 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
7506 a9e0c397 Iustin Pop
7507 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
7508 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7509 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7510 4d4a651d Michael Hanselmann
                                                  new_lvs)
7511 4c4e4e1e Iustin Pop
      msg = result.fail_msg
7512 2cc1da8b Iustin Pop
      if msg:
7513 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
7514 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7515 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
7516 4c4e4e1e Iustin Pop
          if msg2:
7517 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7518 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
7519 2bb5c911 Michael Hanselmann
                                     "volumes"))
7520 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7521 a9e0c397 Iustin Pop
7522 a9e0c397 Iustin Pop
      dev.children = new_lvs
7523 a9e0c397 Iustin Pop
7524 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
7525 a9e0c397 Iustin Pop
7526 7ea7bcf6 Iustin Pop
    cstep = 5
7527 7ea7bcf6 Iustin Pop
    if self.early_release:
7528 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7529 7ea7bcf6 Iustin Pop
      cstep += 1
7530 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7531 d5cd389c Iustin Pop
      # WARNING: we release both node locks here, do not do other RPCs
7532 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7533 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.target_node, self.other_node])
7534 7ea7bcf6 Iustin Pop
7535 2bb5c911 Michael Hanselmann
    # Wait for sync
7536 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7537 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7538 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7539 7ea7bcf6 Iustin Pop
    cstep += 1
7540 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7541 a9e0c397 Iustin Pop
7542 2bb5c911 Michael Hanselmann
    # Check all devices manually
7543 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7544 a9e0c397 Iustin Pop
7545 cff90b79 Iustin Pop
    # Step: remove old storage
7546 7ea7bcf6 Iustin Pop
    if not self.early_release:
7547 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7548 7ea7bcf6 Iustin Pop
      cstep += 1
7549 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7550 a9e0c397 Iustin Pop
7551 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
7552 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
7553 a9e0c397 Iustin Pop

7554 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
7555 a9e0c397 Iustin Pop
      - for all disks of the instance:
7556 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
7557 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
7558 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
7559 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
7560 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
7561 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
7562 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
7563 a9e0c397 Iustin Pop
          not network enabled
7564 a9e0c397 Iustin Pop
      - wait for sync across all devices
7565 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
7566 a9e0c397 Iustin Pop

7567 a9e0c397 Iustin Pop
    Failures are not very well handled.
7568 0834c866 Iustin Pop

7569 a9e0c397 Iustin Pop
    """
7570 0834c866 Iustin Pop
    steps_total = 6
7571 0834c866 Iustin Pop
7572 0834c866 Iustin Pop
    # Step: check device activation
7573 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7574 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
7575 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
7576 0834c866 Iustin Pop
7577 0834c866 Iustin Pop
    # Step: check other node consistency
7578 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7579 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7580 0834c866 Iustin Pop
7581 0834c866 Iustin Pop
    # Step: create new storage
7582 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7583 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7584 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7585 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
7586 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
7587 a9e0c397 Iustin Pop
      for new_lv in dev.children:
7588 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7589 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7590 a9e0c397 Iustin Pop
7591 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
7592 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
7593 a1578d63 Iustin Pop
    # error and the success paths
7594 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7595 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7596 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
7597 2bb5c911 Michael Hanselmann
                                        self.instance.name)
7598 099c52ad Iustin Pop
    logging.debug("Allocated minors %r", minors)
7599 2bb5c911 Michael Hanselmann
7600 2bb5c911 Michael Hanselmann
    iv_names = {}
7601 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7602 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7603 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
7604 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
7605 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
7606 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
7607 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
7608 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7609 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
7610 a2d59d8b Iustin Pop
        p_minor = o_minor1
7611 ffa1c0dc Iustin Pop
      else:
7612 1122eb25 Iustin Pop
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7613 a2d59d8b Iustin Pop
        p_minor = o_minor2
7614 a2d59d8b Iustin Pop
7615 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7616 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
7617 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7618 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
7619 a2d59d8b Iustin Pop
7620 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
7621 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7622 a2d59d8b Iustin Pop
                    new_net_id)
7623 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7624 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
7625 8a6c7011 Iustin Pop
                              children=dev.children,
7626 8a6c7011 Iustin Pop
                              size=dev.size)
7627 796cab27 Iustin Pop
      try:
7628 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7629 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
7630 82759cb1 Iustin Pop
      except errors.GenericError:
7631 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7632 796cab27 Iustin Pop
        raise
7633 a9e0c397 Iustin Pop
7634 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
7635 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7636 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7637 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
7638 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7639 cacfd1fd Iustin Pop
      if msg:
7640 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7641 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
7642 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
7643 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
7644 a9e0c397 Iustin Pop
7645 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7646 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7647 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
7648 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
7649 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
7650 642445d9 Iustin Pop
7651 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7652 a2d59d8b Iustin Pop
    if msg:
7653 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
7654 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7655 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
7656 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
7657 642445d9 Iustin Pop
7658 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
7659 642445d9 Iustin Pop
    # the instance to point to the new secondary
7660 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
7661 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
7662 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
7663 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7664 2bb5c911 Michael Hanselmann
7665 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
7666 a9e0c397 Iustin Pop
7667 642445d9 Iustin Pop
    # and now perform the drbd attach
7668 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7669 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
7670 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7671 4d4a651d Michael Hanselmann
                                            self.new_node],
7672 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
7673 4d4a651d Michael Hanselmann
                                           self.instance.disks,
7674 4d4a651d Michael Hanselmann
                                           self.instance.name,
7675 a2d59d8b Iustin Pop
                                           False)
7676 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
7677 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
7678 a2d59d8b Iustin Pop
      if msg:
7679 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7680 4d4a651d Michael Hanselmann
                           to_node, msg,
7681 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
7682 2bb5c911 Michael Hanselmann
                                 " status of disks"))
7683 7ea7bcf6 Iustin Pop
    cstep = 5
7684 7ea7bcf6 Iustin Pop
    if self.early_release:
7685 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7686 7ea7bcf6 Iustin Pop
      cstep += 1
7687 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7688 d5cd389c Iustin Pop
      # WARNING: we release all node locks here, do not do other RPCs
7689 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7690 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.instance.primary_node,
7691 d5cd389c Iustin Pop
                             self.target_node,
7692 d5cd389c Iustin Pop
                             self.new_node])
7693 a9e0c397 Iustin Pop
7694 2bb5c911 Michael Hanselmann
    # Wait for sync
7695 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7696 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7697 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7698 7ea7bcf6 Iustin Pop
    cstep += 1
7699 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7700 a9e0c397 Iustin Pop
7701 2bb5c911 Michael Hanselmann
    # Check all devices manually
7702 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7703 22985314 Guido Trotter
7704 2bb5c911 Michael Hanselmann
    # Step: remove old storage
7705 7ea7bcf6 Iustin Pop
    if not self.early_release:
7706 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7707 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7708 a9e0c397 Iustin Pop
7709 a8083063 Iustin Pop
7710 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
7711 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
7712 76aef8fc Michael Hanselmann

7713 76aef8fc Michael Hanselmann
  """
7714 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
7715 76aef8fc Michael Hanselmann
  REQ_BGL = False
7716 76aef8fc Michael Hanselmann
7717 76aef8fc Michael Hanselmann
  def CheckArguments(self):
7718 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7719 76aef8fc Michael Hanselmann
7720 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
7721 0e3baaf3 Iustin Pop
7722 76aef8fc Michael Hanselmann
  def ExpandNames(self):
7723 76aef8fc Michael Hanselmann
    self.needed_locks = {
7724 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
7725 76aef8fc Michael Hanselmann
      }
7726 76aef8fc Michael Hanselmann
7727 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
7728 7e9c6a78 Iustin Pop
    """Ensure faulty disks abort the opcode or at least warn."""
7729 7e9c6a78 Iustin Pop
    try:
7730 7e9c6a78 Iustin Pop
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7731 7e9c6a78 Iustin Pop
                                  node_name, True):
7732 7e9c6a78 Iustin Pop
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7733 7e9c6a78 Iustin Pop
                                   " node '%s'" % (instance.name, node_name),
7734 7e9c6a78 Iustin Pop
                                   errors.ECODE_STATE)
7735 7e9c6a78 Iustin Pop
    except errors.OpPrereqError, err:
7736 7e9c6a78 Iustin Pop
      if self.op.ignore_consistency:
7737 7e9c6a78 Iustin Pop
        self.proc.LogWarning(str(err.args[0]))
7738 7e9c6a78 Iustin Pop
      else:
7739 7e9c6a78 Iustin Pop
        raise
7740 76aef8fc Michael Hanselmann
7741 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
7742 76aef8fc Michael Hanselmann
    """Check prerequisites.
7743 76aef8fc Michael Hanselmann

7744 76aef8fc Michael Hanselmann
    """
7745 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7746 76aef8fc Michael Hanselmann
7747 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7748 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7749 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7750 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7751 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7752 76aef8fc Michael Hanselmann
7753 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7754 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7755 7e9c6a78 Iustin Pop
      if not inst.admin_up:
7756 7e9c6a78 Iustin Pop
        continue
7757 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7758 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7759 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7760 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7761 76aef8fc Michael Hanselmann
7762 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7763 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7764 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7765 76aef8fc Michael Hanselmann
7766 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7767 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7768 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7769 76aef8fc Michael Hanselmann
                                           self.op.name,
7770 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7771 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7772 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7773 76aef8fc Michael Hanselmann
7774 76aef8fc Michael Hanselmann
7775 f7e7689f Iustin Pop
class LUNodeEvacuationStrategy(NoHooksLU):
7776 f7e7689f Iustin Pop
  """Computes the node evacuation strategy.
7777 f7e7689f Iustin Pop

7778 f7e7689f Iustin Pop
  """
7779 f7e7689f Iustin Pop
  _OP_REQP = ["nodes"]
7780 f7e7689f Iustin Pop
  REQ_BGL = False
7781 f7e7689f Iustin Pop
7782 f7e7689f Iustin Pop
  def CheckArguments(self):
7783 f7e7689f Iustin Pop
    if not hasattr(self.op, "remote_node"):
7784 f7e7689f Iustin Pop
      self.op.remote_node = None
7785 f7e7689f Iustin Pop
    if not hasattr(self.op, "iallocator"):
7786 f7e7689f Iustin Pop
      self.op.iallocator = None
7787 f7e7689f Iustin Pop
    if self.op.remote_node is not None and self.op.iallocator is not None:
7788 f7e7689f Iustin Pop
      raise errors.OpPrereqError("Give either the iallocator or the new"
7789 f7e7689f Iustin Pop
                                 " secondary, not both", errors.ECODE_INVAL)
7790 f7e7689f Iustin Pop
7791 f7e7689f Iustin Pop
  def ExpandNames(self):
7792 f7e7689f Iustin Pop
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7793 f7e7689f Iustin Pop
    self.needed_locks = locks = {}
7794 f7e7689f Iustin Pop
    if self.op.remote_node is None:
7795 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7796 f7e7689f Iustin Pop
    else:
7797 f7e7689f Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7798 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7799 f7e7689f Iustin Pop
7800 f7e7689f Iustin Pop
  def CheckPrereq(self):
7801 f7e7689f Iustin Pop
    pass
7802 f7e7689f Iustin Pop
7803 f7e7689f Iustin Pop
  def Exec(self, feedback_fn):
7804 f7e7689f Iustin Pop
    if self.op.remote_node is not None:
7805 f7e7689f Iustin Pop
      instances = []
7806 f7e7689f Iustin Pop
      for node in self.op.nodes:
7807 f7e7689f Iustin Pop
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7808 f7e7689f Iustin Pop
      result = []
7809 f7e7689f Iustin Pop
      for i in instances:
7810 f7e7689f Iustin Pop
        if i.primary_node == self.op.remote_node:
7811 f7e7689f Iustin Pop
          raise errors.OpPrereqError("Node %s is the primary node of"
7812 f7e7689f Iustin Pop
                                     " instance %s, cannot use it as"
7813 f7e7689f Iustin Pop
                                     " secondary" %
7814 f7e7689f Iustin Pop
                                     (self.op.remote_node, i.name),
7815 f7e7689f Iustin Pop
                                     errors.ECODE_INVAL)
7816 f7e7689f Iustin Pop
        result.append([i.name, self.op.remote_node])
7817 f7e7689f Iustin Pop
    else:
7818 f7e7689f Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
7819 f7e7689f Iustin Pop
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7820 f7e7689f Iustin Pop
                       evac_nodes=self.op.nodes)
7821 f7e7689f Iustin Pop
      ial.Run(self.op.iallocator, validate=True)
7822 f7e7689f Iustin Pop
      if not ial.success:
7823 f7e7689f Iustin Pop
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7824 f7e7689f Iustin Pop
                                 errors.ECODE_NORES)
7825 f7e7689f Iustin Pop
      result = ial.result
7826 f7e7689f Iustin Pop
    return result
7827 f7e7689f Iustin Pop
7828 f7e7689f Iustin Pop
7829 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7830 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7831 8729e0d7 Iustin Pop

7832 8729e0d7 Iustin Pop
  """
7833 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7834 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7835 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7836 31e63dbf Guido Trotter
  REQ_BGL = False
7837 31e63dbf Guido Trotter
7838 31e63dbf Guido Trotter
  def ExpandNames(self):
7839 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7840 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7841 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7842 31e63dbf Guido Trotter
7843 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7844 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7845 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7846 8729e0d7 Iustin Pop
7847 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7848 8729e0d7 Iustin Pop
    """Build hooks env.
7849 8729e0d7 Iustin Pop

7850 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7851 8729e0d7 Iustin Pop

7852 8729e0d7 Iustin Pop
    """
7853 8729e0d7 Iustin Pop
    env = {
7854 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7855 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7856 8729e0d7 Iustin Pop
      }
7857 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7858 abd8e836 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7859 8729e0d7 Iustin Pop
    return env, nl, nl
7860 8729e0d7 Iustin Pop
7861 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7862 8729e0d7 Iustin Pop
    """Check prerequisites.
7863 8729e0d7 Iustin Pop

7864 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7865 8729e0d7 Iustin Pop

7866 8729e0d7 Iustin Pop
    """
7867 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7868 31e63dbf Guido Trotter
    assert instance is not None, \
7869 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7870 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7871 6b12959c Iustin Pop
    for node in nodenames:
7872 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7873 7527a8a4 Iustin Pop
7874 31e63dbf Guido Trotter
7875 8729e0d7 Iustin Pop
    self.instance = instance
7876 8729e0d7 Iustin Pop
7877 728489a3 Guido Trotter
    if instance.disk_template not in constants.DTS_GROWABLE:
7878 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7879 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7880 8729e0d7 Iustin Pop
7881 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7882 8729e0d7 Iustin Pop
7883 2c42c5df Guido Trotter
    if instance.disk_template != constants.DT_FILE:
7884 2c42c5df Guido Trotter
      # TODO: check the free disk space for file, when that feature will be
7885 2c42c5df Guido Trotter
      # supported
7886 2c42c5df Guido Trotter
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
7887 8729e0d7 Iustin Pop
7888 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7889 8729e0d7 Iustin Pop
    """Execute disk grow.
7890 8729e0d7 Iustin Pop

7891 8729e0d7 Iustin Pop
    """
7892 8729e0d7 Iustin Pop
    instance = self.instance
7893 ad24e046 Iustin Pop
    disk = self.disk
7894 6b12959c Iustin Pop
    for node in instance.all_nodes:
7895 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7896 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7897 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7898 5bc556dd Michael Hanselmann
7899 5bc556dd Michael Hanselmann
      # TODO: Rewrite code to work properly
7900 5bc556dd Michael Hanselmann
      # DRBD goes into sync mode for a short amount of time after executing the
7901 5bc556dd Michael Hanselmann
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7902 5bc556dd Michael Hanselmann
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7903 5bc556dd Michael Hanselmann
      # time is a work-around.
7904 5bc556dd Michael Hanselmann
      time.sleep(5)
7905 5bc556dd Michael Hanselmann
7906 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7907 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7908 6605411d Iustin Pop
    if self.op.wait_for_sync:
7909 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7910 6605411d Iustin Pop
      if disk_abort:
7911 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7912 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7913 8729e0d7 Iustin Pop
7914 8729e0d7 Iustin Pop
7915 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7916 a8083063 Iustin Pop
  """Query runtime instance data.
7917 a8083063 Iustin Pop

7918 a8083063 Iustin Pop
  """
7919 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7920 a987fa48 Guido Trotter
  REQ_BGL = False
7921 ae5849b5 Michael Hanselmann
7922 a987fa48 Guido Trotter
  def ExpandNames(self):
7923 a987fa48 Guido Trotter
    self.needed_locks = {}
7924 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7925 a987fa48 Guido Trotter
7926 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7927 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7928 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7929 a987fa48 Guido Trotter
7930 a987fa48 Guido Trotter
    if self.op.instances:
7931 a987fa48 Guido Trotter
      self.wanted_names = []
7932 a987fa48 Guido Trotter
      for name in self.op.instances:
7933 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
7934 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7935 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7936 a987fa48 Guido Trotter
    else:
7937 a987fa48 Guido Trotter
      self.wanted_names = None
7938 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7939 a987fa48 Guido Trotter
7940 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7941 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7942 a987fa48 Guido Trotter
7943 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7944 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7945 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7946 a8083063 Iustin Pop
7947 a8083063 Iustin Pop
  def CheckPrereq(self):
7948 a8083063 Iustin Pop
    """Check prerequisites.
7949 a8083063 Iustin Pop

7950 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7951 a8083063 Iustin Pop

7952 a8083063 Iustin Pop
    """
7953 a987fa48 Guido Trotter
    if self.wanted_names is None:
7954 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7955 a8083063 Iustin Pop
7956 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7957 a987fa48 Guido Trotter
                             in self.wanted_names]
7958 a987fa48 Guido Trotter
    return
7959 a8083063 Iustin Pop
7960 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7961 98825740 Michael Hanselmann
    """Returns the status of a block device
7962 98825740 Michael Hanselmann

7963 98825740 Michael Hanselmann
    """
7964 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7965 98825740 Michael Hanselmann
      return None
7966 98825740 Michael Hanselmann
7967 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7968 98825740 Michael Hanselmann
7969 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7970 98825740 Michael Hanselmann
    if result.offline:
7971 98825740 Michael Hanselmann
      return None
7972 98825740 Michael Hanselmann
7973 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7974 98825740 Michael Hanselmann
7975 98825740 Michael Hanselmann
    status = result.payload
7976 ddfe2228 Michael Hanselmann
    if status is None:
7977 ddfe2228 Michael Hanselmann
      return None
7978 98825740 Michael Hanselmann
7979 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7980 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7981 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7982 98825740 Michael Hanselmann
7983 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7984 a8083063 Iustin Pop
    """Compute block device status.
7985 a8083063 Iustin Pop

7986 a8083063 Iustin Pop
    """
7987 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7988 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7989 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7990 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7991 a8083063 Iustin Pop
      else:
7992 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7993 a8083063 Iustin Pop
7994 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7995 98825740 Michael Hanselmann
                                              instance.name, dev)
7996 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7997 a8083063 Iustin Pop
7998 a8083063 Iustin Pop
    if dev.children:
7999 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
8000 a8083063 Iustin Pop
                      for child in dev.children]
8001 a8083063 Iustin Pop
    else:
8002 a8083063 Iustin Pop
      dev_children = []
8003 a8083063 Iustin Pop
8004 a8083063 Iustin Pop
    data = {
8005 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
8006 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
8007 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
8008 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
8009 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
8010 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
8011 a8083063 Iustin Pop
      "children": dev_children,
8012 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
8013 c98162a7 Iustin Pop
      "size": dev.size,
8014 a8083063 Iustin Pop
      }
8015 a8083063 Iustin Pop
8016 a8083063 Iustin Pop
    return data
8017 a8083063 Iustin Pop
8018 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8019 a8083063 Iustin Pop
    """Gather and return data"""
8020 a8083063 Iustin Pop
    result = {}
8021 338e51e8 Iustin Pop
8022 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
8023 338e51e8 Iustin Pop
8024 a8083063 Iustin Pop
    for instance in self.wanted_instances:
8025 57821cac Iustin Pop
      if not self.op.static:
8026 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
8027 57821cac Iustin Pop
                                                  instance.name,
8028 57821cac Iustin Pop
                                                  instance.hypervisor)
8029 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
8030 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
8031 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
8032 57821cac Iustin Pop
          remote_state = "up"
8033 57821cac Iustin Pop
        else:
8034 57821cac Iustin Pop
          remote_state = "down"
8035 a8083063 Iustin Pop
      else:
8036 57821cac Iustin Pop
        remote_state = None
8037 0d68c45d Iustin Pop
      if instance.admin_up:
8038 a8083063 Iustin Pop
        config_state = "up"
8039 0d68c45d Iustin Pop
      else:
8040 0d68c45d Iustin Pop
        config_state = "down"
8041 a8083063 Iustin Pop
8042 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
8043 a8083063 Iustin Pop
               for device in instance.disks]
8044 a8083063 Iustin Pop
8045 a8083063 Iustin Pop
      idict = {
8046 a8083063 Iustin Pop
        "name": instance.name,
8047 a8083063 Iustin Pop
        "config_state": config_state,
8048 a8083063 Iustin Pop
        "run_state": remote_state,
8049 a8083063 Iustin Pop
        "pnode": instance.primary_node,
8050 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
8051 a8083063 Iustin Pop
        "os": instance.os,
8052 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
8053 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
8054 a8083063 Iustin Pop
        "disks": disks,
8055 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
8056 24838135 Iustin Pop
        "network_port": instance.network_port,
8057 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
8058 7736a5f2 Iustin Pop
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
8059 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
8060 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
8061 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
8062 90f72445 Iustin Pop
        "mtime": instance.mtime,
8063 90f72445 Iustin Pop
        "ctime": instance.ctime,
8064 033d58b0 Iustin Pop
        "uuid": instance.uuid,
8065 a8083063 Iustin Pop
        }
8066 a8083063 Iustin Pop
8067 a8083063 Iustin Pop
      result[instance.name] = idict
8068 a8083063 Iustin Pop
8069 a8083063 Iustin Pop
    return result
8070 a8083063 Iustin Pop
8071 a8083063 Iustin Pop
8072 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
8073 a8083063 Iustin Pop
  """Modifies an instances's parameters.
8074 a8083063 Iustin Pop

8075 a8083063 Iustin Pop
  """
8076 a8083063 Iustin Pop
  HPATH = "instance-modify"
8077 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8078 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
8079 1a5c7281 Guido Trotter
  REQ_BGL = False
8080 1a5c7281 Guido Trotter
8081 24991749 Iustin Pop
  def CheckArguments(self):
8082 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
8083 24991749 Iustin Pop
      self.op.nics = []
8084 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
8085 24991749 Iustin Pop
      self.op.disks = []
8086 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
8087 24991749 Iustin Pop
      self.op.beparams = {}
8088 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
8089 24991749 Iustin Pop
      self.op.hvparams = {}
8090 e29e9550 Iustin Pop
    if not hasattr(self.op, "disk_template"):
8091 e29e9550 Iustin Pop
      self.op.disk_template = None
8092 e29e9550 Iustin Pop
    if not hasattr(self.op, "remote_node"):
8093 e29e9550 Iustin Pop
      self.op.remote_node = None
8094 96b39bcc Iustin Pop
    if not hasattr(self.op, "os_name"):
8095 96b39bcc Iustin Pop
      self.op.os_name = None
8096 96b39bcc Iustin Pop
    if not hasattr(self.op, "force_variant"):
8097 96b39bcc Iustin Pop
      self.op.force_variant = False
8098 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
8099 e29e9550 Iustin Pop
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8100 96b39bcc Iustin Pop
            self.op.hvparams or self.op.beparams or self.op.os_name):
8101 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8102 24991749 Iustin Pop
8103 7736a5f2 Iustin Pop
    if self.op.hvparams:
8104 7736a5f2 Iustin Pop
      _CheckGlobalHvParams(self.op.hvparams)
8105 7736a5f2 Iustin Pop
8106 24991749 Iustin Pop
    # Disk validation
8107 24991749 Iustin Pop
    disk_addremove = 0
8108 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8109 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8110 24991749 Iustin Pop
        disk_addremove += 1
8111 24991749 Iustin Pop
        continue
8112 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8113 24991749 Iustin Pop
        disk_addremove += 1
8114 24991749 Iustin Pop
      else:
8115 24991749 Iustin Pop
        if not isinstance(disk_op, int):
8116 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8117 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
8118 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8119 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8120 8b46606c Guido Trotter
8121 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
8122 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8123 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
8124 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8125 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8126 24991749 Iustin Pop
        size = disk_dict.get('size', None)
8127 24991749 Iustin Pop
        if size is None:
8128 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
8129 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8130 24991749 Iustin Pop
        try:
8131 24991749 Iustin Pop
          size = int(size)
8132 691744c4 Iustin Pop
        except (TypeError, ValueError), err:
8133 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8134 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
8135 24991749 Iustin Pop
        disk_dict['size'] = size
8136 24991749 Iustin Pop
      else:
8137 24991749 Iustin Pop
        # modification of disk
8138 24991749 Iustin Pop
        if 'size' in disk_dict:
8139 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
8140 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
8141 24991749 Iustin Pop
8142 24991749 Iustin Pop
    if disk_addremove > 1:
8143 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
8144 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8145 24991749 Iustin Pop
8146 e29e9550 Iustin Pop
    if self.op.disks and self.op.disk_template is not None:
8147 e29e9550 Iustin Pop
      raise errors.OpPrereqError("Disk template conversion and other disk"
8148 e29e9550 Iustin Pop
                                 " changes not supported at the same time",
8149 e29e9550 Iustin Pop
                                 errors.ECODE_INVAL)
8150 e29e9550 Iustin Pop
8151 e29e9550 Iustin Pop
    if self.op.disk_template:
8152 e29e9550 Iustin Pop
      _CheckDiskTemplate(self.op.disk_template)
8153 e29e9550 Iustin Pop
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
8154 e29e9550 Iustin Pop
          self.op.remote_node is None):
8155 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
8156 e29e9550 Iustin Pop
                                   " one requires specifying a secondary node",
8157 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8158 e29e9550 Iustin Pop
8159 24991749 Iustin Pop
    # NIC validation
8160 24991749 Iustin Pop
    nic_addremove = 0
8161 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8162 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8163 24991749 Iustin Pop
        nic_addremove += 1
8164 24991749 Iustin Pop
        continue
8165 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8166 24991749 Iustin Pop
        nic_addremove += 1
8167 24991749 Iustin Pop
      else:
8168 24991749 Iustin Pop
        if not isinstance(nic_op, int):
8169 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8170 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
8171 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8172 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8173 24991749 Iustin Pop
8174 24991749 Iustin Pop
      # nic_dict should be a dict
8175 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
8176 24991749 Iustin Pop
      if nic_ip is not None:
8177 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
8178 24991749 Iustin Pop
          nic_dict['ip'] = None
8179 24991749 Iustin Pop
        else:
8180 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
8181 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8182 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
8183 5c44da6a Guido Trotter
8184 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
8185 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
8186 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
8187 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8188 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
8189 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8190 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
8191 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8192 cd098c41 Guido Trotter
        nic_dict['link'] = None
8193 cd098c41 Guido Trotter
8194 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
8195 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
8196 5c44da6a Guido Trotter
        if nic_mac is None:
8197 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
8198 5c44da6a Guido Trotter
8199 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8200 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8201 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8202 82187135 Renรฉ Nussbaumer
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8203 82187135 Renรฉ Nussbaumer
8204 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8205 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8206 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
8207 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8208 5c44da6a Guido Trotter
8209 24991749 Iustin Pop
    if nic_addremove > 1:
8210 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8211 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8212 24991749 Iustin Pop
8213 1a5c7281 Guido Trotter
  def ExpandNames(self):
8214 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
8215 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
8216 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8217 74409b12 Iustin Pop
8218 74409b12 Iustin Pop
  def DeclareLocks(self, level):
8219 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
8220 74409b12 Iustin Pop
      self._LockInstancesNodes()
8221 e29e9550 Iustin Pop
      if self.op.disk_template and self.op.remote_node:
8222 e29e9550 Iustin Pop
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8223 e29e9550 Iustin Pop
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8224 a8083063 Iustin Pop
8225 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8226 a8083063 Iustin Pop
    """Build hooks env.
8227 a8083063 Iustin Pop

8228 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
8229 a8083063 Iustin Pop

8230 a8083063 Iustin Pop
    """
8231 396e1b78 Michael Hanselmann
    args = dict()
8232 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
8233 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
8234 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
8235 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8236 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8237 d8dcf3c9 Guido Trotter
    # information at all.
8238 d8dcf3c9 Guido Trotter
    if self.op.nics:
8239 d8dcf3c9 Guido Trotter
      args['nics'] = []
8240 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
8241 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
8242 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
8243 d8dcf3c9 Guido Trotter
        if idx in nic_override:
8244 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
8245 d8dcf3c9 Guido Trotter
        else:
8246 d8dcf3c9 Guido Trotter
          this_nic_override = {}
8247 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
8248 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
8249 d8dcf3c9 Guido Trotter
        else:
8250 d8dcf3c9 Guido Trotter
          ip = nic.ip
8251 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
8252 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
8253 d8dcf3c9 Guido Trotter
        else:
8254 d8dcf3c9 Guido Trotter
          mac = nic.mac
8255 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
8256 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
8257 62f0dd02 Guido Trotter
        else:
8258 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
8259 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8260 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8261 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8262 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
8263 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8264 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
8265 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
8266 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8267 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8268 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8269 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
8270 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
8271 d8dcf3c9 Guido Trotter
8272 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8273 e29e9550 Iustin Pop
    if self.op.disk_template:
8274 e29e9550 Iustin Pop
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8275 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8276 a8083063 Iustin Pop
    return env, nl, nl
8277 a8083063 Iustin Pop
8278 7e950d31 Iustin Pop
  @staticmethod
8279 7e950d31 Iustin Pop
  def _GetUpdatedParams(old_params, update_dict,
8280 0329617a Guido Trotter
                        default_values, parameter_types):
8281 0329617a Guido Trotter
    """Return the new params dict for the given params.
8282 0329617a Guido Trotter

8283 0329617a Guido Trotter
    @type old_params: dict
8284 f2fd87d7 Iustin Pop
    @param old_params: old parameters
8285 0329617a Guido Trotter
    @type update_dict: dict
8286 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
8287 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
8288 f2fd87d7 Iustin Pop
                        parameter to its default value
8289 0329617a Guido Trotter
    @type default_values: dict
8290 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
8291 0329617a Guido Trotter
    @type parameter_types: dict
8292 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
8293 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
8294 0329617a Guido Trotter
    @rtype: (dict, dict)
8295 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
8296 0329617a Guido Trotter

8297 0329617a Guido Trotter
    """
8298 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
8299 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
8300 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
8301 0329617a Guido Trotter
        try:
8302 0329617a Guido Trotter
          del params_copy[key]
8303 0329617a Guido Trotter
        except KeyError:
8304 0329617a Guido Trotter
          pass
8305 0329617a Guido Trotter
      else:
8306 0329617a Guido Trotter
        params_copy[key] = val
8307 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
8308 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
8309 0329617a Guido Trotter
    return (params_copy, params_filled)
8310 0329617a Guido Trotter
8311 a8083063 Iustin Pop
  def CheckPrereq(self):
8312 a8083063 Iustin Pop
    """Check prerequisites.
8313 a8083063 Iustin Pop

8314 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
8315 a8083063 Iustin Pop

8316 a8083063 Iustin Pop
    """
8317 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
8318 a8083063 Iustin Pop
8319 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
8320 31a853d2 Iustin Pop
8321 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8322 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
8323 1a5c7281 Guido Trotter
    assert self.instance is not None, \
8324 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
8325 6b12959c Iustin Pop
    pnode = instance.primary_node
8326 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
8327 74409b12 Iustin Pop
8328 e29e9550 Iustin Pop
    if self.op.disk_template:
8329 e29e9550 Iustin Pop
      if instance.disk_template == self.op.disk_template:
8330 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Instance already has disk template %s" %
8331 e29e9550 Iustin Pop
                                   instance.disk_template, errors.ECODE_INVAL)
8332 e29e9550 Iustin Pop
8333 e29e9550 Iustin Pop
      if (instance.disk_template,
8334 e29e9550 Iustin Pop
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8335 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8336 e29e9550 Iustin Pop
                                   " %s to %s" % (instance.disk_template,
8337 e29e9550 Iustin Pop
                                                  self.op.disk_template),
8338 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8339 e29e9550 Iustin Pop
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8340 e29e9550 Iustin Pop
        _CheckNodeOnline(self, self.op.remote_node)
8341 e29e9550 Iustin Pop
        _CheckNodeNotDrained(self, self.op.remote_node)
8342 e29e9550 Iustin Pop
        disks = [{"size": d.size} for d in instance.disks]
8343 e29e9550 Iustin Pop
        required = _ComputeDiskSize(self.op.disk_template, disks)
8344 e29e9550 Iustin Pop
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8345 e29e9550 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot change disk template")
8346 e29e9550 Iustin Pop
8347 338e51e8 Iustin Pop
    # hvparams processing
8348 74409b12 Iustin Pop
    if self.op.hvparams:
8349 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
8350 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
8351 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
8352 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
8353 74409b12 Iustin Pop
      # local check
8354 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
8355 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
8356 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8357 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
8358 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
8359 338e51e8 Iustin Pop
    else:
8360 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
8361 338e51e8 Iustin Pop
8362 338e51e8 Iustin Pop
    # beparams processing
8363 338e51e8 Iustin Pop
    if self.op.beparams:
8364 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
8365 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
8366 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
8367 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
8368 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
8369 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
8370 338e51e8 Iustin Pop
    else:
8371 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
8372 74409b12 Iustin Pop
8373 cfefe007 Guido Trotter
    self.warn = []
8374 647a5d80 Iustin Pop
8375 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
8376 647a5d80 Iustin Pop
      mem_check_list = [pnode]
8377 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8378 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
8379 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
8380 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8381 72737a7f Iustin Pop
                                                  instance.hypervisor)
8382 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8383 72737a7f Iustin Pop
                                         instance.hypervisor)
8384 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
8385 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
8386 070e998b Iustin Pop
      if msg:
8387 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
8388 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
8389 070e998b Iustin Pop
                         (pnode,  msg))
8390 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8391 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
8392 070e998b Iustin Pop
                         " free memory information" % pnode)
8393 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
8394 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
8395 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
8396 cfefe007 Guido Trotter
      else:
8397 7ad1af4a Iustin Pop
        if instance_info.payload:
8398 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
8399 cfefe007 Guido Trotter
        else:
8400 cfefe007 Guido Trotter
          # Assume instance not running
8401 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
8402 cfefe007 Guido Trotter
          # and we have no other way to check)
8403 cfefe007 Guido Trotter
          current_mem = 0
8404 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8405 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
8406 cfefe007 Guido Trotter
        if miss_mem > 0:
8407 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
8408 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
8409 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
8410 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
8411 cfefe007 Guido Trotter
8412 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8413 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
8414 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
8415 ea33068f Iustin Pop
            continue
8416 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
8417 070e998b Iustin Pop
          if msg:
8418 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
8419 070e998b Iustin Pop
                             (node, msg))
8420 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
8421 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
8422 070e998b Iustin Pop
                             " memory information" % node)
8423 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8424 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
8425 647a5d80 Iustin Pop
                             " secondary node %s" % node)
8426 5bc84f33 Alexander Schreiber
8427 24991749 Iustin Pop
    # NIC processing
8428 cd098c41 Guido Trotter
    self.nic_pnew = {}
8429 cd098c41 Guido Trotter
    self.nic_pinst = {}
8430 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8431 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8432 24991749 Iustin Pop
        if not instance.nics:
8433 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8434 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8435 24991749 Iustin Pop
        continue
8436 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
8437 24991749 Iustin Pop
        # an existing nic
8438 21bcb9aa Michael Hanselmann
        if not instance.nics:
8439 21bcb9aa Michael Hanselmann
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8440 21bcb9aa Michael Hanselmann
                                     " no NICs" % nic_op,
8441 21bcb9aa Michael Hanselmann
                                     errors.ECODE_INVAL)
8442 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
8443 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8444 24991749 Iustin Pop
                                     " are 0 to %d" %
8445 21bcb9aa Michael Hanselmann
                                     (nic_op, len(instance.nics) - 1),
8446 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8447 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
8448 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
8449 cd098c41 Guido Trotter
      else:
8450 cd098c41 Guido Trotter
        old_nic_params = {}
8451 cd098c41 Guido Trotter
        old_nic_ip = None
8452 cd098c41 Guido Trotter
8453 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
8454 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
8455 cd098c41 Guido Trotter
                                 if key in nic_dict])
8456 cd098c41 Guido Trotter
8457 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
8458 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8459 cd098c41 Guido Trotter
8460 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
8461 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
8462 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
8463 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
8464 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8465 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
8466 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
8467 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8468 cd098c41 Guido Trotter
8469 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8470 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8471 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8472 35c0c8da Iustin Pop
        if msg:
8473 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8474 24991749 Iustin Pop
          if self.force:
8475 24991749 Iustin Pop
            self.warn.append(msg)
8476 24991749 Iustin Pop
          else:
8477 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8478 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8479 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
8480 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
8481 cd098c41 Guido Trotter
        else:
8482 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
8483 cd098c41 Guido Trotter
        if nic_ip is None:
8484 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8485 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
8486 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8487 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8488 5c44da6a Guido Trotter
        if nic_mac is None:
8489 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8490 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8491 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8492 5c44da6a Guido Trotter
          # otherwise generate the mac
8493 36b66e6e Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8494 5c44da6a Guido Trotter
        else:
8495 5c44da6a Guido Trotter
          # or validate/reserve the current one
8496 36b66e6e Guido Trotter
          try:
8497 36b66e6e Guido Trotter
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8498 36b66e6e Guido Trotter
          except errors.ReservationError:
8499 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
8500 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
8501 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
8502 24991749 Iustin Pop
8503 24991749 Iustin Pop
    # DISK processing
8504 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8505 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
8506 5c983ee5 Iustin Pop
                                 " diskless instances",
8507 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8508 1122eb25 Iustin Pop
    for disk_op, _ in self.op.disks:
8509 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8510 24991749 Iustin Pop
        if len(instance.disks) == 1:
8511 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
8512 31624382 Iustin Pop
                                     " an instance", errors.ECODE_INVAL)
8513 31624382 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot remove disks")
8514 24991749 Iustin Pop
8515 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
8516 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
8517 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8518 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
8519 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
8520 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8521 24991749 Iustin Pop
        # an existing disk
8522 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
8523 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8524 24991749 Iustin Pop
                                     " are 0 to %d" %
8525 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
8526 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8527 24991749 Iustin Pop
8528 96b39bcc Iustin Pop
    # OS change
8529 96b39bcc Iustin Pop
    if self.op.os_name and not self.op.force:
8530 96b39bcc Iustin Pop
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8531 96b39bcc Iustin Pop
                      self.op.force_variant)
8532 96b39bcc Iustin Pop
8533 a8083063 Iustin Pop
    return
8534 a8083063 Iustin Pop
8535 e29e9550 Iustin Pop
  def _ConvertPlainToDrbd(self, feedback_fn):
8536 e29e9550 Iustin Pop
    """Converts an instance from plain to drbd.
8537 e29e9550 Iustin Pop

8538 e29e9550 Iustin Pop
    """
8539 e29e9550 Iustin Pop
    feedback_fn("Converting template to drbd")
8540 e29e9550 Iustin Pop
    instance = self.instance
8541 e29e9550 Iustin Pop
    pnode = instance.primary_node
8542 e29e9550 Iustin Pop
    snode = self.op.remote_node
8543 e29e9550 Iustin Pop
8544 e29e9550 Iustin Pop
    # create a fake disk info for _GenerateDiskTemplate
8545 e29e9550 Iustin Pop
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
8546 e29e9550 Iustin Pop
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
8547 e29e9550 Iustin Pop
                                      instance.name, pnode, [snode],
8548 e29e9550 Iustin Pop
                                      disk_info, None, None, 0)
8549 e29e9550 Iustin Pop
    info = _GetInstanceInfoText(instance)
8550 e29e9550 Iustin Pop
    feedback_fn("Creating aditional volumes...")
8551 e29e9550 Iustin Pop
    # first, create the missing data and meta devices
8552 e29e9550 Iustin Pop
    for disk in new_disks:
8553 e29e9550 Iustin Pop
      # unfortunately this is... not too nice
8554 e29e9550 Iustin Pop
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
8555 e29e9550 Iustin Pop
                            info, True)
8556 e29e9550 Iustin Pop
      for child in disk.children:
8557 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
8558 e29e9550 Iustin Pop
    # at this stage, all new LVs have been created, we can rename the
8559 e29e9550 Iustin Pop
    # old ones
8560 e29e9550 Iustin Pop
    feedback_fn("Renaming original volumes...")
8561 e29e9550 Iustin Pop
    rename_list = [(o, n.children[0].logical_id)
8562 e29e9550 Iustin Pop
                   for (o, n) in zip(instance.disks, new_disks)]
8563 e29e9550 Iustin Pop
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
8564 e29e9550 Iustin Pop
    result.Raise("Failed to rename original LVs")
8565 e29e9550 Iustin Pop
8566 e29e9550 Iustin Pop
    feedback_fn("Initializing DRBD devices...")
8567 e29e9550 Iustin Pop
    # all child devices are in place, we can now create the DRBD devices
8568 e29e9550 Iustin Pop
    for disk in new_disks:
8569 e29e9550 Iustin Pop
      for node in [pnode, snode]:
8570 e29e9550 Iustin Pop
        f_create = node == pnode
8571 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
8572 e29e9550 Iustin Pop
8573 e29e9550 Iustin Pop
    # at this point, the instance has been modified
8574 e29e9550 Iustin Pop
    instance.disk_template = constants.DT_DRBD8
8575 e29e9550 Iustin Pop
    instance.disks = new_disks
8576 e29e9550 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8577 e29e9550 Iustin Pop
8578 e29e9550 Iustin Pop
    # disks are created, waiting for sync
8579 e29e9550 Iustin Pop
    disk_abort = not _WaitForSync(self, instance)
8580 e29e9550 Iustin Pop
    if disk_abort:
8581 e29e9550 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
8582 e29e9550 Iustin Pop
                               " this instance, please cleanup manually")
8583 e29e9550 Iustin Pop
8584 2f414c48 Iustin Pop
  def _ConvertDrbdToPlain(self, feedback_fn):
8585 2f414c48 Iustin Pop
    """Converts an instance from drbd to plain.
8586 2f414c48 Iustin Pop

8587 2f414c48 Iustin Pop
    """
8588 2f414c48 Iustin Pop
    instance = self.instance
8589 2f414c48 Iustin Pop
    assert len(instance.secondary_nodes) == 1
8590 2f414c48 Iustin Pop
    pnode = instance.primary_node
8591 2f414c48 Iustin Pop
    snode = instance.secondary_nodes[0]
8592 2f414c48 Iustin Pop
    feedback_fn("Converting template to plain")
8593 2f414c48 Iustin Pop
8594 2f414c48 Iustin Pop
    old_disks = instance.disks
8595 2f414c48 Iustin Pop
    new_disks = [d.children[0] for d in old_disks]
8596 2f414c48 Iustin Pop
8597 2f414c48 Iustin Pop
    # copy over size and mode
8598 2f414c48 Iustin Pop
    for parent, child in zip(old_disks, new_disks):
8599 2f414c48 Iustin Pop
      child.size = parent.size
8600 2f414c48 Iustin Pop
      child.mode = parent.mode
8601 2f414c48 Iustin Pop
8602 2f414c48 Iustin Pop
    # update instance structure
8603 2f414c48 Iustin Pop
    instance.disks = new_disks
8604 2f414c48 Iustin Pop
    instance.disk_template = constants.DT_PLAIN
8605 2f414c48 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8606 2f414c48 Iustin Pop
8607 2f414c48 Iustin Pop
    feedback_fn("Removing volumes on the secondary node...")
8608 2f414c48 Iustin Pop
    for disk in old_disks:
8609 2f414c48 Iustin Pop
      self.cfg.SetDiskID(disk, snode)
8610 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
8611 2f414c48 Iustin Pop
      if msg:
8612 2f414c48 Iustin Pop
        self.LogWarning("Could not remove block device %s on node %s,"
8613 2f414c48 Iustin Pop
                        " continuing anyway: %s", disk.iv_name, snode, msg)
8614 2f414c48 Iustin Pop
8615 2f414c48 Iustin Pop
    feedback_fn("Removing unneeded volumes on the primary node...")
8616 2f414c48 Iustin Pop
    for idx, disk in enumerate(old_disks):
8617 2f414c48 Iustin Pop
      meta = disk.children[1]
8618 2f414c48 Iustin Pop
      self.cfg.SetDiskID(meta, pnode)
8619 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
8620 2f414c48 Iustin Pop
      if msg:
8621 2f414c48 Iustin Pop
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
8622 2f414c48 Iustin Pop
                        " continuing anyway: %s", idx, pnode, msg)
8623 2f414c48 Iustin Pop
8624 2f414c48 Iustin Pop
8625 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8626 a8083063 Iustin Pop
    """Modifies an instance.
8627 a8083063 Iustin Pop

8628 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
8629 24991749 Iustin Pop

8630 a8083063 Iustin Pop
    """
8631 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
8632 cfefe007 Guido Trotter
    # feedback_fn there.
8633 cfefe007 Guido Trotter
    for warn in self.warn:
8634 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
8635 cfefe007 Guido Trotter
8636 a8083063 Iustin Pop
    result = []
8637 a8083063 Iustin Pop
    instance = self.instance
8638 24991749 Iustin Pop
    # disk changes
8639 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8640 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8641 24991749 Iustin Pop
        # remove the last disk
8642 24991749 Iustin Pop
        device = instance.disks.pop()
8643 24991749 Iustin Pop
        device_idx = len(instance.disks)
8644 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8645 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
8646 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8647 e1bc0878 Iustin Pop
          if msg:
8648 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8649 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
8650 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
8651 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8652 24991749 Iustin Pop
        # add a new disk
8653 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
8654 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
8655 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
8656 24991749 Iustin Pop
        else:
8657 24991749 Iustin Pop
          file_driver = file_path = None
8658 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
8659 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
8660 24991749 Iustin Pop
                                         instance.disk_template,
8661 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
8662 24991749 Iustin Pop
                                         instance.secondary_nodes,
8663 24991749 Iustin Pop
                                         [disk_dict],
8664 24991749 Iustin Pop
                                         file_path,
8665 24991749 Iustin Pop
                                         file_driver,
8666 24991749 Iustin Pop
                                         disk_idx_base)[0]
8667 24991749 Iustin Pop
        instance.disks.append(new_disk)
8668 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
8669 24991749 Iustin Pop
8670 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
8671 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
8672 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
8673 24991749 Iustin Pop
        #HARDCODE
8674 428958aa Iustin Pop
        for node in instance.all_nodes:
8675 428958aa Iustin Pop
          f_create = node == instance.primary_node
8676 796cab27 Iustin Pop
          try:
8677 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
8678 428958aa Iustin Pop
                            f_create, info, f_create)
8679 1492cca7 Iustin Pop
          except errors.OpExecError, err:
8680 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
8681 428958aa Iustin Pop
                            " node %s: %s",
8682 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
8683 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8684 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
8685 24991749 Iustin Pop
      else:
8686 24991749 Iustin Pop
        # change a given disk
8687 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
8688 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8689 e29e9550 Iustin Pop
8690 e29e9550 Iustin Pop
    if self.op.disk_template:
8691 e29e9550 Iustin Pop
      r_shut = _ShutdownInstanceDisks(self, instance)
8692 e29e9550 Iustin Pop
      if not r_shut:
8693 e29e9550 Iustin Pop
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
8694 e29e9550 Iustin Pop
                                 " proceed with disk template conversion")
8695 e29e9550 Iustin Pop
      mode = (instance.disk_template, self.op.disk_template)
8696 e29e9550 Iustin Pop
      try:
8697 e29e9550 Iustin Pop
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
8698 e29e9550 Iustin Pop
      except:
8699 e29e9550 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
8700 e29e9550 Iustin Pop
        raise
8701 e29e9550 Iustin Pop
      result.append(("disk_template", self.op.disk_template))
8702 e29e9550 Iustin Pop
8703 24991749 Iustin Pop
    # NIC changes
8704 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8705 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8706 24991749 Iustin Pop
        # remove the last nic
8707 24991749 Iustin Pop
        del instance.nics[-1]
8708 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
8709 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8710 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
8711 5c44da6a Guido Trotter
        mac = nic_dict['mac']
8712 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
8713 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
8714 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8715 24991749 Iustin Pop
        instance.nics.append(new_nic)
8716 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
8717 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8718 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
8719 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8720 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8721 cd098c41 Guido Trotter
                       )))
8722 24991749 Iustin Pop
      else:
8723 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
8724 24991749 Iustin Pop
          if key in nic_dict:
8725 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
8726 beabf067 Guido Trotter
        if nic_op in self.nic_pinst:
8727 beabf067 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8728 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
8729 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
8730 24991749 Iustin Pop
8731 24991749 Iustin Pop
    # hvparams changes
8732 74409b12 Iustin Pop
    if self.op.hvparams:
8733 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
8734 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
8735 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
8736 24991749 Iustin Pop
8737 24991749 Iustin Pop
    # beparams changes
8738 338e51e8 Iustin Pop
    if self.op.beparams:
8739 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
8740 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
8741 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
8742 a8083063 Iustin Pop
8743 96b39bcc Iustin Pop
    # OS change
8744 96b39bcc Iustin Pop
    if self.op.os_name:
8745 96b39bcc Iustin Pop
      instance.os = self.op.os_name
8746 96b39bcc Iustin Pop
8747 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
8748 a8083063 Iustin Pop
8749 a8083063 Iustin Pop
    return result
8750 a8083063 Iustin Pop
8751 e29e9550 Iustin Pop
  _DISK_CONVERSIONS = {
8752 e29e9550 Iustin Pop
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
8753 2f414c48 Iustin Pop
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
8754 e29e9550 Iustin Pop
    }
8755 a8083063 Iustin Pop
8756 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
8757 a8083063 Iustin Pop
  """Query the exports list
8758 a8083063 Iustin Pop

8759 a8083063 Iustin Pop
  """
8760 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
8761 21a15682 Guido Trotter
  REQ_BGL = False
8762 21a15682 Guido Trotter
8763 21a15682 Guido Trotter
  def ExpandNames(self):
8764 21a15682 Guido Trotter
    self.needed_locks = {}
8765 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
8766 21a15682 Guido Trotter
    if not self.op.nodes:
8767 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8768 21a15682 Guido Trotter
    else:
8769 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
8770 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
8771 a8083063 Iustin Pop
8772 a8083063 Iustin Pop
  def CheckPrereq(self):
8773 21a15682 Guido Trotter
    """Check prerequisites.
8774 a8083063 Iustin Pop

8775 a8083063 Iustin Pop
    """
8776 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8777 a8083063 Iustin Pop
8778 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8779 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
8780 a8083063 Iustin Pop

8781 e4376078 Iustin Pop
    @rtype: dict
8782 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
8783 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
8784 e4376078 Iustin Pop
        that node.
8785 a8083063 Iustin Pop

8786 a8083063 Iustin Pop
    """
8787 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
8788 b04285f2 Guido Trotter
    result = {}
8789 b04285f2 Guido Trotter
    for node in rpcresult:
8790 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
8791 b04285f2 Guido Trotter
        result[node] = False
8792 b04285f2 Guido Trotter
      else:
8793 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
8794 b04285f2 Guido Trotter
8795 b04285f2 Guido Trotter
    return result
8796 a8083063 Iustin Pop
8797 a8083063 Iustin Pop
8798 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
8799 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
8800 a8083063 Iustin Pop

8801 a8083063 Iustin Pop
  """
8802 a8083063 Iustin Pop
  HPATH = "instance-export"
8803 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8804 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8805 6657590e Guido Trotter
  REQ_BGL = False
8806 6657590e Guido Trotter
8807 17c3f802 Guido Trotter
  def CheckArguments(self):
8808 17c3f802 Guido Trotter
    """Check the arguments.
8809 17c3f802 Guido Trotter

8810 17c3f802 Guido Trotter
    """
8811 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8812 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8813 17c3f802 Guido Trotter
8814 6657590e Guido Trotter
  def ExpandNames(self):
8815 6657590e Guido Trotter
    self._ExpandAndLockInstance()
8816 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
8817 6657590e Guido Trotter
    #
8818 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
8819 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
8820 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
8821 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
8822 6657590e Guido Trotter
    #    then one to remove, after
8823 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
8824 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8825 6657590e Guido Trotter
8826 6657590e Guido Trotter
  def DeclareLocks(self, level):
8827 6657590e Guido Trotter
    """Last minute lock declaration."""
8828 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
8829 a8083063 Iustin Pop
8830 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8831 a8083063 Iustin Pop
    """Build hooks env.
8832 a8083063 Iustin Pop

8833 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
8834 a8083063 Iustin Pop

8835 a8083063 Iustin Pop
    """
8836 a8083063 Iustin Pop
    env = {
8837 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
8838 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8839 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8840 a8083063 Iustin Pop
      }
8841 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8842 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8843 a8083063 Iustin Pop
          self.op.target_node]
8844 a8083063 Iustin Pop
    return env, nl, nl
8845 a8083063 Iustin Pop
8846 a8083063 Iustin Pop
  def CheckPrereq(self):
8847 a8083063 Iustin Pop
    """Check prerequisites.
8848 a8083063 Iustin Pop

8849 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
8850 a8083063 Iustin Pop

8851 a8083063 Iustin Pop
    """
8852 6657590e Guido Trotter
    instance_name = self.op.instance_name
8853 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8854 6657590e Guido Trotter
    assert self.instance is not None, \
8855 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
8856 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
8857 a8083063 Iustin Pop
8858 cf26a87a Iustin Pop
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8859 cf26a87a Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8860 cf26a87a Iustin Pop
    assert self.dst_node is not None
8861 a8083063 Iustin Pop
8862 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
8863 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
8864 a8083063 Iustin Pop
8865 b6023d6c Manuel Franceschini
    # instance disk type verification
8866 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
8867 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
8868 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
8869 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
8870 b6023d6c Manuel Franceschini
8871 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8872 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
8873 a8083063 Iustin Pop

8874 a8083063 Iustin Pop
    """
8875 a8083063 Iustin Pop
    instance = self.instance
8876 a8083063 Iustin Pop
    dst_node = self.dst_node
8877 a8083063 Iustin Pop
    src_node = instance.primary_node
8878 37972df0 Michael Hanselmann
8879 a8083063 Iustin Pop
    if self.op.shutdown:
8880 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
8881 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
8882 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
8883 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
8884 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
8885 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
8886 a8083063 Iustin Pop
8887 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
8888 a8083063 Iustin Pop
8889 a8083063 Iustin Pop
    snap_disks = []
8890 a8083063 Iustin Pop
8891 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
8892 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
8893 998c712c Iustin Pop
    for disk in instance.disks:
8894 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
8895 998c712c Iustin Pop
8896 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
8897 3e53a60b Michael Hanselmann
8898 3e53a60b Michael Hanselmann
    if activate_disks:
8899 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
8900 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
8901 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
8902 3e53a60b Michael Hanselmann
8903 a8083063 Iustin Pop
    try:
8904 3e53a60b Michael Hanselmann
      # per-disk results
8905 3e53a60b Michael Hanselmann
      dresults = []
8906 3e53a60b Michael Hanselmann
      try:
8907 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
8908 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
8909 3e53a60b Michael Hanselmann
                      (idx, src_node))
8910 3e53a60b Michael Hanselmann
8911 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
8912 3e53a60b Michael Hanselmann
          # passed
8913 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
8914 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8915 3e53a60b Michael Hanselmann
          if msg:
8916 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8917 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
8918 3e53a60b Michael Hanselmann
            snap_disks.append(False)
8919 3e53a60b Michael Hanselmann
          else:
8920 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
8921 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8922 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
8923 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
8924 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
8925 37972df0 Michael Hanselmann
8926 3e53a60b Michael Hanselmann
      finally:
8927 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
8928 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
8929 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
8930 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8931 3e53a60b Michael Hanselmann
          if msg:
8932 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
8933 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
8934 3e53a60b Michael Hanselmann
8935 3e53a60b Michael Hanselmann
      # TODO: check for size
8936 3e53a60b Michael Hanselmann
8937 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
8938 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
8939 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
8940 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
8941 3e53a60b Michael Hanselmann
        if dev:
8942 4a0e011f Iustin Pop
          # FIXME: pass debug from opcode to backend
8943 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
8944 4a0e011f Iustin Pop
                                                 instance, cluster_name,
8945 dd713605 Iustin Pop
                                                 idx, self.op.debug_level)
8946 3e53a60b Michael Hanselmann
          msg = result.fail_msg
8947 3e53a60b Michael Hanselmann
          if msg:
8948 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
8949 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
8950 3e53a60b Michael Hanselmann
            dresults.append(False)
8951 3e53a60b Michael Hanselmann
          else:
8952 3e53a60b Michael Hanselmann
            dresults.append(True)
8953 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
8954 3e53a60b Michael Hanselmann
          if msg:
8955 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
8956 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
8957 19d7f90a Guido Trotter
        else:
8958 084f05a5 Iustin Pop
          dresults.append(False)
8959 a8083063 Iustin Pop
8960 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
8961 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
8962 3e53a60b Michael Hanselmann
                                             snap_disks)
8963 3e53a60b Michael Hanselmann
      fin_resu = True
8964 3e53a60b Michael Hanselmann
      msg = result.fail_msg
8965 3e53a60b Michael Hanselmann
      if msg:
8966 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
8967 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
8968 3e53a60b Michael Hanselmann
        fin_resu = False
8969 3e53a60b Michael Hanselmann
8970 3e53a60b Michael Hanselmann
    finally:
8971 3e53a60b Michael Hanselmann
      if activate_disks:
8972 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
8973 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
8974 a8083063 Iustin Pop
8975 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
8976 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
8977 a8083063 Iustin Pop
8978 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
8979 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
8980 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
8981 35fbcd11 Iustin Pop
    iname = instance.name
8982 a8083063 Iustin Pop
    if nodelist:
8983 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8984 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
8985 a8083063 Iustin Pop
      for node in exportlist:
8986 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
8987 781de953 Iustin Pop
          continue
8988 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
8989 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8990 35fbcd11 Iustin Pop
          if msg:
8991 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
8992 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
8993 084f05a5 Iustin Pop
    return fin_resu, dresults
8994 5c947f38 Iustin Pop
8995 5c947f38 Iustin Pop
8996 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
8997 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
8998 9ac99fda Guido Trotter

8999 9ac99fda Guido Trotter
  """
9000 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
9001 3656b3af Guido Trotter
  REQ_BGL = False
9002 3656b3af Guido Trotter
9003 3656b3af Guido Trotter
  def ExpandNames(self):
9004 3656b3af Guido Trotter
    self.needed_locks = {}
9005 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
9006 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
9007 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
9008 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9009 9ac99fda Guido Trotter
9010 9ac99fda Guido Trotter
  def CheckPrereq(self):
9011 9ac99fda Guido Trotter
    """Check prerequisites.
9012 9ac99fda Guido Trotter
    """
9013 9ac99fda Guido Trotter
    pass
9014 9ac99fda Guido Trotter
9015 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
9016 9ac99fda Guido Trotter
    """Remove any export.
9017 9ac99fda Guido Trotter

9018 9ac99fda Guido Trotter
    """
9019 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9020 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
9021 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
9022 9ac99fda Guido Trotter
    fqdn_warn = False
9023 9ac99fda Guido Trotter
    if not instance_name:
9024 9ac99fda Guido Trotter
      fqdn_warn = True
9025 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
9026 9ac99fda Guido Trotter
9027 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9028 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
9029 9ac99fda Guido Trotter
    found = False
9030 9ac99fda Guido Trotter
    for node in exportlist:
9031 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
9032 1b7bfbb7 Iustin Pop
      if msg:
9033 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9034 781de953 Iustin Pop
        continue
9035 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
9036 9ac99fda Guido Trotter
        found = True
9037 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
9038 4c4e4e1e Iustin Pop
        msg = result.fail_msg
9039 35fbcd11 Iustin Pop
        if msg:
9040 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
9041 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
9042 9ac99fda Guido Trotter
9043 9ac99fda Guido Trotter
    if fqdn_warn and not found:
9044 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
9045 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
9046 9ac99fda Guido Trotter
                  " Domain Name.")
9047 9ac99fda Guido Trotter
9048 9ac99fda Guido Trotter
9049 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9050 5c947f38 Iustin Pop
  """Generic tags LU.
9051 5c947f38 Iustin Pop

9052 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
9053 5c947f38 Iustin Pop

9054 5c947f38 Iustin Pop
  """
9055 5c947f38 Iustin Pop
9056 8646adce Guido Trotter
  def ExpandNames(self):
9057 8646adce Guido Trotter
    self.needed_locks = {}
9058 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
9059 cf26a87a Iustin Pop
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9060 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
9061 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
9062 cf26a87a Iustin Pop
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9063 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9064 8646adce Guido Trotter
9065 8646adce Guido Trotter
  def CheckPrereq(self):
9066 8646adce Guido Trotter
    """Check prerequisites.
9067 8646adce Guido Trotter

9068 8646adce Guido Trotter
    """
9069 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
9070 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
9071 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
9072 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
9073 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
9074 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
9075 5c947f38 Iustin Pop
    else:
9076 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9077 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
9078 5c947f38 Iustin Pop
9079 5c947f38 Iustin Pop
9080 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
9081 5c947f38 Iustin Pop
  """Returns the tags of a given object.
9082 5c947f38 Iustin Pop

9083 5c947f38 Iustin Pop
  """
9084 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
9085 8646adce Guido Trotter
  REQ_BGL = False
9086 5c947f38 Iustin Pop
9087 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9088 5c947f38 Iustin Pop
    """Returns the tag list.
9089 5c947f38 Iustin Pop

9090 5c947f38 Iustin Pop
    """
9091 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
9092 5c947f38 Iustin Pop
9093 5c947f38 Iustin Pop
9094 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
9095 73415719 Iustin Pop
  """Searches the tags for a given pattern.
9096 73415719 Iustin Pop

9097 73415719 Iustin Pop
  """
9098 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
9099 8646adce Guido Trotter
  REQ_BGL = False
9100 8646adce Guido Trotter
9101 8646adce Guido Trotter
  def ExpandNames(self):
9102 8646adce Guido Trotter
    self.needed_locks = {}
9103 73415719 Iustin Pop
9104 73415719 Iustin Pop
  def CheckPrereq(self):
9105 73415719 Iustin Pop
    """Check prerequisites.
9106 73415719 Iustin Pop

9107 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
9108 73415719 Iustin Pop

9109 73415719 Iustin Pop
    """
9110 73415719 Iustin Pop
    try:
9111 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
9112 73415719 Iustin Pop
    except re.error, err:
9113 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
9114 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
9115 73415719 Iustin Pop
9116 73415719 Iustin Pop
  def Exec(self, feedback_fn):
9117 73415719 Iustin Pop
    """Returns the tag list.
9118 73415719 Iustin Pop

9119 73415719 Iustin Pop
    """
9120 73415719 Iustin Pop
    cfg = self.cfg
9121 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
9122 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
9123 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
9124 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
9125 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
9126 73415719 Iustin Pop
    results = []
9127 73415719 Iustin Pop
    for path, target in tgts:
9128 73415719 Iustin Pop
      for tag in target.GetTags():
9129 73415719 Iustin Pop
        if self.re.search(tag):
9130 73415719 Iustin Pop
          results.append((path, tag))
9131 73415719 Iustin Pop
    return results
9132 73415719 Iustin Pop
9133 73415719 Iustin Pop
9134 f27302fa Iustin Pop
class LUAddTags(TagsLU):
9135 5c947f38 Iustin Pop
  """Sets a tag on a given object.
9136 5c947f38 Iustin Pop

9137 5c947f38 Iustin Pop
  """
9138 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
9139 8646adce Guido Trotter
  REQ_BGL = False
9140 5c947f38 Iustin Pop
9141 5c947f38 Iustin Pop
  def CheckPrereq(self):
9142 5c947f38 Iustin Pop
    """Check prerequisites.
9143 5c947f38 Iustin Pop

9144 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
9145 5c947f38 Iustin Pop

9146 5c947f38 Iustin Pop
    """
9147 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9148 f27302fa Iustin Pop
    for tag in self.op.tags:
9149 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9150 5c947f38 Iustin Pop
9151 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9152 5c947f38 Iustin Pop
    """Sets the tag.
9153 5c947f38 Iustin Pop

9154 5c947f38 Iustin Pop
    """
9155 5c947f38 Iustin Pop
    try:
9156 f27302fa Iustin Pop
      for tag in self.op.tags:
9157 f27302fa Iustin Pop
        self.target.AddTag(tag)
9158 5c947f38 Iustin Pop
    except errors.TagError, err:
9159 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
9160 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9161 5c947f38 Iustin Pop
9162 5c947f38 Iustin Pop
9163 f27302fa Iustin Pop
class LUDelTags(TagsLU):
9164 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
9165 5c947f38 Iustin Pop

9166 5c947f38 Iustin Pop
  """
9167 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
9168 8646adce Guido Trotter
  REQ_BGL = False
9169 5c947f38 Iustin Pop
9170 5c947f38 Iustin Pop
  def CheckPrereq(self):
9171 5c947f38 Iustin Pop
    """Check prerequisites.
9172 5c947f38 Iustin Pop

9173 5c947f38 Iustin Pop
    This checks that we have the given tag.
9174 5c947f38 Iustin Pop

9175 5c947f38 Iustin Pop
    """
9176 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9177 f27302fa Iustin Pop
    for tag in self.op.tags:
9178 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9179 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
9180 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
9181 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
9182 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
9183 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
9184 f27302fa Iustin Pop
      diff_names.sort()
9185 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
9186 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9187 5c947f38 Iustin Pop
9188 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9189 5c947f38 Iustin Pop
    """Remove the tag from the object.
9190 5c947f38 Iustin Pop

9191 5c947f38 Iustin Pop
    """
9192 f27302fa Iustin Pop
    for tag in self.op.tags:
9193 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
9194 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9195 06009e27 Iustin Pop
9196 0eed6e61 Guido Trotter
9197 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
9198 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
9199 06009e27 Iustin Pop

9200 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
9201 06009e27 Iustin Pop
  time.
9202 06009e27 Iustin Pop

9203 06009e27 Iustin Pop
  """
9204 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
9205 fbe9022f Guido Trotter
  REQ_BGL = False
9206 06009e27 Iustin Pop
9207 fbe9022f Guido Trotter
  def ExpandNames(self):
9208 fbe9022f Guido Trotter
    """Expand names and set required locks.
9209 06009e27 Iustin Pop

9210 fbe9022f Guido Trotter
    This expands the node list, if any.
9211 06009e27 Iustin Pop

9212 06009e27 Iustin Pop
    """
9213 fbe9022f Guido Trotter
    self.needed_locks = {}
9214 06009e27 Iustin Pop
    if self.op.on_nodes:
9215 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
9216 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9217 fbe9022f Guido Trotter
      # more information.
9218 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9219 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9220 fbe9022f Guido Trotter
9221 fbe9022f Guido Trotter
  def CheckPrereq(self):
9222 fbe9022f Guido Trotter
    """Check prerequisites.
9223 fbe9022f Guido Trotter

9224 fbe9022f Guido Trotter
    """
9225 06009e27 Iustin Pop
9226 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
9227 06009e27 Iustin Pop
    """Do the actual sleep.
9228 06009e27 Iustin Pop

9229 06009e27 Iustin Pop
    """
9230 06009e27 Iustin Pop
    if self.op.on_master:
9231 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
9232 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
9233 06009e27 Iustin Pop
    if self.op.on_nodes:
9234 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9235 06009e27 Iustin Pop
      for node, node_result in result.items():
9236 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
9237 d61df03e Iustin Pop
9238 d61df03e Iustin Pop
9239 d1c2dd75 Iustin Pop
class IAllocator(object):
9240 d1c2dd75 Iustin Pop
  """IAllocator framework.
9241 d61df03e Iustin Pop

9242 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
9243 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
9244 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
9245 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
9246 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
9247 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
9248 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
9249 d1c2dd75 Iustin Pop
      easy usage
9250 d61df03e Iustin Pop

9251 d61df03e Iustin Pop
  """
9252 7260cfbe Iustin Pop
  # pylint: disable-msg=R0902
9253 7260cfbe Iustin Pop
  # lots of instance attributes
9254 29859cb7 Iustin Pop
  _ALLO_KEYS = [
9255 8d3f86a0 Iustin Pop
    "name", "mem_size", "disks", "disk_template",
9256 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
9257 d1c2dd75 Iustin Pop
    ]
9258 29859cb7 Iustin Pop
  _RELO_KEYS = [
9259 8d3f86a0 Iustin Pop
    "name", "relocate_from",
9260 29859cb7 Iustin Pop
    ]
9261 7f60a422 Iustin Pop
  _EVAC_KEYS = [
9262 7f60a422 Iustin Pop
    "evac_nodes",
9263 7f60a422 Iustin Pop
    ]
9264 d1c2dd75 Iustin Pop
9265 8d3f86a0 Iustin Pop
  def __init__(self, cfg, rpc, mode, **kwargs):
9266 923ddac0 Michael Hanselmann
    self.cfg = cfg
9267 923ddac0 Michael Hanselmann
    self.rpc = rpc
9268 d1c2dd75 Iustin Pop
    # init buffer variables
9269 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
9270 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
9271 29859cb7 Iustin Pop
    self.mode = mode
9272 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
9273 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
9274 a0add446 Iustin Pop
    self.hypervisor = None
9275 29859cb7 Iustin Pop
    self.relocate_from = None
9276 8d3f86a0 Iustin Pop
    self.name = None
9277 7f60a422 Iustin Pop
    self.evac_nodes = None
9278 27579978 Iustin Pop
    # computed fields
9279 27579978 Iustin Pop
    self.required_nodes = None
9280 d1c2dd75 Iustin Pop
    # init result fields
9281 680f0a89 Iustin Pop
    self.success = self.info = self.result = None
9282 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9283 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
9284 9757cc90 Iustin Pop
      fn = self._AddNewInstance
9285 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9286 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
9287 9757cc90 Iustin Pop
      fn = self._AddRelocateInstance
9288 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9289 7f60a422 Iustin Pop
      keyset = self._EVAC_KEYS
9290 7f60a422 Iustin Pop
      fn = self._AddEvacuateNodes
9291 29859cb7 Iustin Pop
    else:
9292 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
9293 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
9294 d1c2dd75 Iustin Pop
    for key in kwargs:
9295 29859cb7 Iustin Pop
      if key not in keyset:
9296 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
9297 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9298 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
9299 7f60a422 Iustin Pop
9300 29859cb7 Iustin Pop
    for key in keyset:
9301 d1c2dd75 Iustin Pop
      if key not in kwargs:
9302 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
9303 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9304 9757cc90 Iustin Pop
    self._BuildInputData(fn)
9305 d1c2dd75 Iustin Pop
9306 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
9307 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
9308 d1c2dd75 Iustin Pop

9309 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
9310 d1c2dd75 Iustin Pop

9311 d1c2dd75 Iustin Pop
    """
9312 923ddac0 Michael Hanselmann
    cfg = self.cfg
9313 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
9314 d1c2dd75 Iustin Pop
    # cluster data
9315 d1c2dd75 Iustin Pop
    data = {
9316 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
9317 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
9318 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
9319 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
9320 d1c2dd75 Iustin Pop
      # we don't have job IDs
9321 d61df03e Iustin Pop
      }
9322 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
9323 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
9324 6286519f Iustin Pop
9325 d1c2dd75 Iustin Pop
    # node data
9326 d1c2dd75 Iustin Pop
    node_results = {}
9327 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
9328 8cc7e742 Guido Trotter
9329 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9330 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
9331 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9332 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
9333 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9334 7f60a422 Iustin Pop
      hypervisor_name = cluster_info.enabled_hypervisors[0]
9335 8cc7e742 Guido Trotter
9336 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
9337 923ddac0 Michael Hanselmann
                                        hypervisor_name)
9338 923ddac0 Michael Hanselmann
    node_iinfo = \
9339 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
9340 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
9341 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
9342 1325da74 Iustin Pop
      # first fill in static (config-based) values
9343 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
9344 d1c2dd75 Iustin Pop
      pnr = {
9345 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
9346 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
9347 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
9348 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
9349 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
9350 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
9351 d1c2dd75 Iustin Pop
        }
9352 1325da74 Iustin Pop
9353 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
9354 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
9355 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
9356 4c4e4e1e Iustin Pop
                                nname)
9357 070e998b Iustin Pop
        remote_info = nresult.payload
9358 b142ef15 Iustin Pop
9359 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
9360 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
9361 1325da74 Iustin Pop
          if attr not in remote_info:
9362 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
9363 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
9364 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
9365 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
9366 070e998b Iustin Pop
                                     " for '%s': %s" %
9367 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
9368 1325da74 Iustin Pop
        # compute memory used by primary instances
9369 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
9370 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
9371 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
9372 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
9373 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
9374 1325da74 Iustin Pop
              i_used_mem = 0
9375 1325da74 Iustin Pop
            else:
9376 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
9377 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
9378 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
9379 1325da74 Iustin Pop
9380 1325da74 Iustin Pop
            if iinfo.admin_up:
9381 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
9382 1325da74 Iustin Pop
9383 1325da74 Iustin Pop
        # compute memory used by instances
9384 1325da74 Iustin Pop
        pnr_dyn = {
9385 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
9386 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
9387 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
9388 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
9389 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
9390 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
9391 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
9392 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
9393 1325da74 Iustin Pop
          }
9394 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
9395 1325da74 Iustin Pop
9396 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
9397 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
9398 d1c2dd75 Iustin Pop
9399 d1c2dd75 Iustin Pop
    # instance data
9400 d1c2dd75 Iustin Pop
    instance_data = {}
9401 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
9402 a9fe7e8f Guido Trotter
      nic_data = []
9403 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
9404 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
9405 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
9406 a9fe7e8f Guido Trotter
            nic.nicparams)
9407 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
9408 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
9409 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
9410 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
9411 a9fe7e8f Guido Trotter
                   }
9412 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
9413 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
9414 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
9415 d1c2dd75 Iustin Pop
      pir = {
9416 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
9417 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
9418 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
9419 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
9420 d1c2dd75 Iustin Pop
        "os": iinfo.os,
9421 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
9422 d1c2dd75 Iustin Pop
        "nics": nic_data,
9423 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
9424 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
9425 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
9426 d1c2dd75 Iustin Pop
        }
9427 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
9428 88ae4f85 Iustin Pop
                                                 pir["disks"])
9429 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
9430 d61df03e Iustin Pop
9431 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
9432 d61df03e Iustin Pop
9433 d1c2dd75 Iustin Pop
    self.in_data = data
9434 d61df03e Iustin Pop
9435 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
9436 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
9437 d61df03e Iustin Pop

9438 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
9439 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9440 d61df03e Iustin Pop

9441 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9442 d1c2dd75 Iustin Pop
    done.
9443 d61df03e Iustin Pop

9444 d1c2dd75 Iustin Pop
    """
9445 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
9446 d1c2dd75 Iustin Pop
9447 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
9448 27579978 Iustin Pop
      self.required_nodes = 2
9449 27579978 Iustin Pop
    else:
9450 27579978 Iustin Pop
      self.required_nodes = 1
9451 d1c2dd75 Iustin Pop
    request = {
9452 d1c2dd75 Iustin Pop
      "name": self.name,
9453 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
9454 d1c2dd75 Iustin Pop
      "tags": self.tags,
9455 d1c2dd75 Iustin Pop
      "os": self.os,
9456 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
9457 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
9458 d1c2dd75 Iustin Pop
      "disks": self.disks,
9459 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
9460 d1c2dd75 Iustin Pop
      "nics": self.nics,
9461 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9462 d1c2dd75 Iustin Pop
      }
9463 9757cc90 Iustin Pop
    return request
9464 298fe380 Iustin Pop
9465 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
9466 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
9467 298fe380 Iustin Pop

9468 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
9469 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9470 d61df03e Iustin Pop

9471 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9472 d1c2dd75 Iustin Pop
    done.
9473 d61df03e Iustin Pop

9474 d1c2dd75 Iustin Pop
    """
9475 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
9476 27579978 Iustin Pop
    if instance is None:
9477 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
9478 27579978 Iustin Pop
                                   " IAllocator" % self.name)
9479 27579978 Iustin Pop
9480 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
9481 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
9482 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
9483 27579978 Iustin Pop
9484 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
9485 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
9486 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
9487 2a139bb0 Iustin Pop
9488 27579978 Iustin Pop
    self.required_nodes = 1
9489 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
9490 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
9491 27579978 Iustin Pop
9492 d1c2dd75 Iustin Pop
    request = {
9493 d1c2dd75 Iustin Pop
      "name": self.name,
9494 27579978 Iustin Pop
      "disk_space_total": disk_space,
9495 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9496 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
9497 d1c2dd75 Iustin Pop
      }
9498 9757cc90 Iustin Pop
    return request
9499 d61df03e Iustin Pop
9500 7f60a422 Iustin Pop
  def _AddEvacuateNodes(self):
9501 7f60a422 Iustin Pop
    """Add evacuate nodes data to allocator structure.
9502 7f60a422 Iustin Pop

9503 7f60a422 Iustin Pop
    """
9504 7f60a422 Iustin Pop
    request = {
9505 7f60a422 Iustin Pop
      "evac_nodes": self.evac_nodes
9506 7f60a422 Iustin Pop
      }
9507 7f60a422 Iustin Pop
    return request
9508 7f60a422 Iustin Pop
9509 9757cc90 Iustin Pop
  def _BuildInputData(self, fn):
9510 d1c2dd75 Iustin Pop
    """Build input data structures.
9511 d61df03e Iustin Pop

9512 d1c2dd75 Iustin Pop
    """
9513 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
9514 d61df03e Iustin Pop
9515 9757cc90 Iustin Pop
    request = fn()
9516 9757cc90 Iustin Pop
    request["type"] = self.mode
9517 9757cc90 Iustin Pop
    self.in_data["request"] = request
9518 d61df03e Iustin Pop
9519 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
9520 d61df03e Iustin Pop
9521 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
9522 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
9523 298fe380 Iustin Pop

9524 d1c2dd75 Iustin Pop
    """
9525 72737a7f Iustin Pop
    if call_fn is None:
9526 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
9527 298fe380 Iustin Pop
9528 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
9529 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
9530 8d528b7c Iustin Pop
9531 87f5c298 Iustin Pop
    self.out_text = result.payload
9532 d1c2dd75 Iustin Pop
    if validate:
9533 d1c2dd75 Iustin Pop
      self._ValidateResult()
9534 298fe380 Iustin Pop
9535 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
9536 d1c2dd75 Iustin Pop
    """Process the allocator results.
9537 538475ca Iustin Pop

9538 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
9539 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
9540 538475ca Iustin Pop

9541 d1c2dd75 Iustin Pop
    """
9542 d1c2dd75 Iustin Pop
    try:
9543 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
9544 d1c2dd75 Iustin Pop
    except Exception, err:
9545 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
9546 d1c2dd75 Iustin Pop
9547 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
9548 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
9549 538475ca Iustin Pop
9550 680f0a89 Iustin Pop
    # TODO: remove backwards compatiblity in later versions
9551 680f0a89 Iustin Pop
    if "nodes" in rdict and "result" not in rdict:
9552 680f0a89 Iustin Pop
      rdict["result"] = rdict["nodes"]
9553 680f0a89 Iustin Pop
      del rdict["nodes"]
9554 680f0a89 Iustin Pop
9555 680f0a89 Iustin Pop
    for key in "success", "info", "result":
9556 d1c2dd75 Iustin Pop
      if key not in rdict:
9557 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
9558 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
9559 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
9560 538475ca Iustin Pop
9561 680f0a89 Iustin Pop
    if not isinstance(rdict["result"], list):
9562 680f0a89 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
9563 d1c2dd75 Iustin Pop
                               " is not a list")
9564 d1c2dd75 Iustin Pop
    self.out_data = rdict
9565 538475ca Iustin Pop
9566 538475ca Iustin Pop
9567 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
9568 d61df03e Iustin Pop
  """Run allocator tests.
9569 d61df03e Iustin Pop

9570 d61df03e Iustin Pop
  This LU runs the allocator tests
9571 d61df03e Iustin Pop

9572 d61df03e Iustin Pop
  """
9573 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
9574 d61df03e Iustin Pop
9575 d61df03e Iustin Pop
  def CheckPrereq(self):
9576 d61df03e Iustin Pop
    """Check prerequisites.
9577 d61df03e Iustin Pop

9578 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
9579 d61df03e Iustin Pop

9580 d61df03e Iustin Pop
    """
9581 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9582 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
9583 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
9584 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
9585 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
9586 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
9587 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
9588 d61df03e Iustin Pop
      if iname is not None:
9589 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
9590 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
9591 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
9592 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
9593 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9594 d61df03e Iustin Pop
      for row in self.op.nics:
9595 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9596 d61df03e Iustin Pop
            "mac" not in row or
9597 d61df03e Iustin Pop
            "ip" not in row or
9598 d61df03e Iustin Pop
            "bridge" not in row):
9599 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
9600 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9601 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
9602 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
9603 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9604 d61df03e Iustin Pop
      for row in self.op.disks:
9605 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9606 d61df03e Iustin Pop
            "size" not in row or
9607 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
9608 d61df03e Iustin Pop
            "mode" not in row or
9609 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
9610 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
9611 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9612 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
9613 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
9614 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9615 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
9616 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
9617 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9618 cf26a87a Iustin Pop
      fname = _ExpandInstanceName(self.cfg, self.op.name)
9619 d61df03e Iustin Pop
      self.op.name = fname
9620 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
9621 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9622 823a72bc Iustin Pop
      if not hasattr(self.op, "evac_nodes"):
9623 823a72bc Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
9624 823a72bc Iustin Pop
                                   " opcode input", errors.ECODE_INVAL)
9625 d61df03e Iustin Pop
    else:
9626 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
9627 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
9628 d61df03e Iustin Pop
9629 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
9630 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
9631 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
9632 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9633 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
9634 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
9635 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
9636 d61df03e Iustin Pop
9637 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
9638 d61df03e Iustin Pop
    """Run the allocator test.
9639 d61df03e Iustin Pop

9640 d61df03e Iustin Pop
    """
9641 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9642 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9643 29859cb7 Iustin Pop
                       mode=self.op.mode,
9644 29859cb7 Iustin Pop
                       name=self.op.name,
9645 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
9646 29859cb7 Iustin Pop
                       disks=self.op.disks,
9647 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
9648 29859cb7 Iustin Pop
                       os=self.op.os,
9649 29859cb7 Iustin Pop
                       tags=self.op.tags,
9650 29859cb7 Iustin Pop
                       nics=self.op.nics,
9651 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
9652 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
9653 29859cb7 Iustin Pop
                       )
9654 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9655 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9656 29859cb7 Iustin Pop
                       mode=self.op.mode,
9657 29859cb7 Iustin Pop
                       name=self.op.name,
9658 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
9659 29859cb7 Iustin Pop
                       )
9660 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9661 823a72bc Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
9662 823a72bc Iustin Pop
                       mode=self.op.mode,
9663 823a72bc Iustin Pop
                       evac_nodes=self.op.evac_nodes)
9664 823a72bc Iustin Pop
    else:
9665 823a72bc Iustin Pop
      raise errors.ProgrammerError("Uncatched mode %s in"
9666 823a72bc Iustin Pop
                                   " LUTestAllocator.Exec", self.op.mode)
9667 d61df03e Iustin Pop
9668 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9669 d1c2dd75 Iustin Pop
      result = ial.in_text
9670 298fe380 Iustin Pop
    else:
9671 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
9672 d1c2dd75 Iustin Pop
      result = ial.out_text
9673 298fe380 Iustin Pop
    return result