Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ c33549ef

History | View | Annotate | Download (341.1 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 b98bf262 Michael Hanselmann
import OpenSSL
37 a8083063 Iustin Pop
38 a8083063 Iustin Pop
from ganeti import ssh
39 a8083063 Iustin Pop
from ganeti import utils
40 a8083063 Iustin Pop
from ganeti import errors
41 a8083063 Iustin Pop
from ganeti import hypervisor
42 6048c986 Guido Trotter
from ganeti import locking
43 a8083063 Iustin Pop
from ganeti import constants
44 a8083063 Iustin Pop
from ganeti import objects
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 1338f2b4 Balazs Lecz
from ganeti import uidpool
48 e311ed53 Michael Hanselmann
from ganeti import compat
49 d61df03e Iustin Pop
50 d61df03e Iustin Pop
51 a8083063 Iustin Pop
class LogicalUnit(object):
52 396e1b78 Michael Hanselmann
  """Logical Unit base class.
53 a8083063 Iustin Pop

54 a8083063 Iustin Pop
  Subclasses must follow these rules:
55 d465bdc8 Guido Trotter
    - implement ExpandNames
56 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
57 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
58 a8083063 Iustin Pop
    - implement BuildHooksEnv
59 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
60 05f86716 Guido Trotter
    - optionally redefine their run requirements:
61 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
62 05f86716 Guido Trotter

63 05f86716 Guido Trotter
  Note that all commands require root permissions.
64 a8083063 Iustin Pop

65 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
66 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
67 20777413 Iustin Pop

68 a8083063 Iustin Pop
  """
69 a8083063 Iustin Pop
  HPATH = None
70 a8083063 Iustin Pop
  HTYPE = None
71 a8083063 Iustin Pop
  _OP_REQP = []
72 7e55040e Guido Trotter
  REQ_BGL = True
73 a8083063 Iustin Pop
74 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
75 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
76 a8083063 Iustin Pop

77 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
78 a8083063 Iustin Pop
    validity.
79 a8083063 Iustin Pop

80 a8083063 Iustin Pop
    """
81 5bfac263 Iustin Pop
    self.proc = processor
82 a8083063 Iustin Pop
    self.op = op
83 77b657a3 Guido Trotter
    self.cfg = context.cfg
84 77b657a3 Guido Trotter
    self.context = context
85 72737a7f Iustin Pop
    self.rpc = rpc
86 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
87 d465bdc8 Guido Trotter
    self.needed_locks = None
88 6683bba2 Guido Trotter
    self.acquired_locks = {}
89 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
90 ca2a79e1 Guido Trotter
    self.add_locks = {}
91 ca2a79e1 Guido Trotter
    self.remove_locks = {}
92 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
93 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
94 c92b310a Michael Hanselmann
    self.__ssh = None
95 86d9d3bb Iustin Pop
    # logging
96 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
97 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
98 d984846d Iustin Pop
    self.LogStep = processor.LogStep # pylint: disable-msg=C0103
99 20777413 Iustin Pop
    # support for dry-run
100 20777413 Iustin Pop
    self.dry_run_result = None
101 ee844e20 Iustin Pop
    # support for generic debug attribute
102 ee844e20 Iustin Pop
    if (not hasattr(self.op, "debug_level") or
103 ee844e20 Iustin Pop
        not isinstance(self.op.debug_level, int)):
104 ee844e20 Iustin Pop
      self.op.debug_level = 0
105 c92b310a Michael Hanselmann
106 6fd35c4d Michael Hanselmann
    # Tasklets
107 3a012b41 Michael Hanselmann
    self.tasklets = None
108 6fd35c4d Michael Hanselmann
109 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
110 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
111 a8083063 Iustin Pop
      if attr_val is None:
112 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
113 5c983ee5 Iustin Pop
                                   attr_name, errors.ECODE_INVAL)
114 6fd35c4d Michael Hanselmann
115 4be4691d Iustin Pop
    self.CheckArguments()
116 a8083063 Iustin Pop
117 c92b310a Michael Hanselmann
  def __GetSSH(self):
118 c92b310a Michael Hanselmann
    """Returns the SshRunner object
119 c92b310a Michael Hanselmann

120 c92b310a Michael Hanselmann
    """
121 c92b310a Michael Hanselmann
    if not self.__ssh:
122 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
123 c92b310a Michael Hanselmann
    return self.__ssh
124 c92b310a Michael Hanselmann
125 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
126 c92b310a Michael Hanselmann
127 4be4691d Iustin Pop
  def CheckArguments(self):
128 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
129 4be4691d Iustin Pop

130 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
131 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
132 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
133 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
134 4be4691d Iustin Pop

135 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
136 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
137 4be4691d Iustin Pop
        waited for them)
138 4be4691d Iustin Pop

139 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
140 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
141 4be4691d Iustin Pop

142 4be4691d Iustin Pop
    """
143 4be4691d Iustin Pop
    pass
144 4be4691d Iustin Pop
145 d465bdc8 Guido Trotter
  def ExpandNames(self):
146 d465bdc8 Guido Trotter
    """Expand names for this LU.
147 d465bdc8 Guido Trotter

148 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
149 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
150 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
151 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
152 d465bdc8 Guido Trotter

153 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
154 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
155 d465bdc8 Guido Trotter
    as values. Rules:
156 e4376078 Iustin Pop

157 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
158 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
159 e4376078 Iustin Pop
      - don't put anything for the BGL level
160 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
161 d465bdc8 Guido Trotter

162 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
163 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
164 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
165 3977a4c1 Guido Trotter

166 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
167 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
168 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
169 6fd35c4d Michael Hanselmann

170 e4376078 Iustin Pop
    Examples::
171 e4376078 Iustin Pop

172 e4376078 Iustin Pop
      # Acquire all nodes and one instance
173 e4376078 Iustin Pop
      self.needed_locks = {
174 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
175 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
176 e4376078 Iustin Pop
      }
177 e4376078 Iustin Pop
      # Acquire just two nodes
178 e4376078 Iustin Pop
      self.needed_locks = {
179 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
180 e4376078 Iustin Pop
      }
181 e4376078 Iustin Pop
      # Acquire no locks
182 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
183 d465bdc8 Guido Trotter

184 d465bdc8 Guido Trotter
    """
185 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
186 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
187 d465bdc8 Guido Trotter
    # time.
188 d465bdc8 Guido Trotter
    if self.REQ_BGL:
189 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
190 d465bdc8 Guido Trotter
    else:
191 d465bdc8 Guido Trotter
      raise NotImplementedError
192 d465bdc8 Guido Trotter
193 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
194 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
195 fb8dcb62 Guido Trotter

196 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
197 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
198 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
199 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
200 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
201 fb8dcb62 Guido Trotter
    default it does nothing.
202 fb8dcb62 Guido Trotter

203 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
204 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
205 fb8dcb62 Guido Trotter

206 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
207 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
208 fb8dcb62 Guido Trotter

209 fb8dcb62 Guido Trotter
    """
210 fb8dcb62 Guido Trotter
211 a8083063 Iustin Pop
  def CheckPrereq(self):
212 a8083063 Iustin Pop
    """Check prerequisites for this LU.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
215 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
216 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
217 a8083063 Iustin Pop
    allowed.
218 a8083063 Iustin Pop

219 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
220 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
223 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
224 a8083063 Iustin Pop

225 a8083063 Iustin Pop
    """
226 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
227 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
228 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
229 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
230 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
231 6fd35c4d Michael Hanselmann
    else:
232 6fd35c4d Michael Hanselmann
      raise NotImplementedError
233 a8083063 Iustin Pop
234 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
235 a8083063 Iustin Pop
    """Execute the LU.
236 a8083063 Iustin Pop

237 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
238 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
239 a8083063 Iustin Pop
    code, or expected.
240 a8083063 Iustin Pop

241 a8083063 Iustin Pop
    """
242 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
243 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
244 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
245 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
246 6fd35c4d Michael Hanselmann
    else:
247 6fd35c4d Michael Hanselmann
      raise NotImplementedError
248 a8083063 Iustin Pop
249 a8083063 Iustin Pop
  def BuildHooksEnv(self):
250 a8083063 Iustin Pop
    """Build hooks environment for this LU.
251 a8083063 Iustin Pop

252 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
253 a8083063 Iustin Pop
    containing the environment that will be used for running the
254 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
255 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
256 a8083063 Iustin Pop
    the hook should run after the execution.
257 a8083063 Iustin Pop

258 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
259 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
260 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
261 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
262 a8083063 Iustin Pop

263 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
264 a8083063 Iustin Pop

265 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
266 a8083063 Iustin Pop
    not be called.
267 a8083063 Iustin Pop

268 a8083063 Iustin Pop
    """
269 a8083063 Iustin Pop
    raise NotImplementedError
270 a8083063 Iustin Pop
271 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
272 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
273 1fce5219 Guido Trotter

274 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
275 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
276 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
277 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
278 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
279 1fce5219 Guido Trotter

280 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
281 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
282 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
283 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
284 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
285 e4376078 Iustin Pop
        in the PRE phase
286 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
287 e4376078 Iustin Pop
        and hook results
288 1fce5219 Guido Trotter

289 1fce5219 Guido Trotter
    """
290 2d54e29c Iustin Pop
    # API must be kept, thus we ignore the unused argument and could
291 2d54e29c Iustin Pop
    # be a function warnings
292 2d54e29c Iustin Pop
    # pylint: disable-msg=W0613,R0201
293 1fce5219 Guido Trotter
    return lu_result
294 1fce5219 Guido Trotter
295 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
296 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
297 43905206 Guido Trotter

298 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
299 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
300 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
301 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
302 43905206 Guido Trotter
    before.
303 43905206 Guido Trotter

304 43905206 Guido Trotter
    """
305 43905206 Guido Trotter
    if self.needed_locks is None:
306 43905206 Guido Trotter
      self.needed_locks = {}
307 43905206 Guido Trotter
    else:
308 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
309 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
310 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
311 cf26a87a Iustin Pop
                                                self.op.instance_name)
312 cf26a87a Iustin Pop
    self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
313 43905206 Guido Trotter
314 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
315 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
316 c4a2fee1 Guido Trotter

317 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
318 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
319 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
320 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
321 c4a2fee1 Guido Trotter

322 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
323 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
324 c4a2fee1 Guido Trotter

325 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
326 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
327 c4a2fee1 Guido Trotter

328 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
329 c4a2fee1 Guido Trotter

330 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
331 e4376078 Iustin Pop
        self._LockInstancesNodes()
332 c4a2fee1 Guido Trotter

333 a82ce292 Guido Trotter
    @type primary_only: boolean
334 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
335 a82ce292 Guido Trotter

336 c4a2fee1 Guido Trotter
    """
337 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
338 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
339 c4a2fee1 Guido Trotter
340 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
341 c4a2fee1 Guido Trotter
342 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
343 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
344 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
345 c4a2fee1 Guido Trotter
    wanted_nodes = []
346 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
347 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
348 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
349 a82ce292 Guido Trotter
      if not primary_only:
350 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
351 9513b6ab Guido Trotter
352 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
353 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
354 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
355 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
356 c4a2fee1 Guido Trotter
357 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
358 c4a2fee1 Guido Trotter
359 a8083063 Iustin Pop
360 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
361 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
362 a8083063 Iustin Pop

363 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
364 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
365 a8083063 Iustin Pop

366 a8083063 Iustin Pop
  """
367 a8083063 Iustin Pop
  HPATH = None
368 a8083063 Iustin Pop
  HTYPE = None
369 a8083063 Iustin Pop
370 fc8a6b8f Iustin Pop
  def BuildHooksEnv(self):
371 fc8a6b8f Iustin Pop
    """Empty BuildHooksEnv for NoHooksLu.
372 fc8a6b8f Iustin Pop

373 fc8a6b8f Iustin Pop
    This just raises an error.
374 fc8a6b8f Iustin Pop

375 fc8a6b8f Iustin Pop
    """
376 fc8a6b8f Iustin Pop
    assert False, "BuildHooksEnv called for NoHooksLUs"
377 fc8a6b8f Iustin Pop
378 a8083063 Iustin Pop
379 9a6800e1 Michael Hanselmann
class Tasklet:
380 9a6800e1 Michael Hanselmann
  """Tasklet base class.
381 9a6800e1 Michael Hanselmann

382 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
383 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
384 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
385 9a6800e1 Michael Hanselmann

386 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
387 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
388 9a6800e1 Michael Hanselmann
    - Implement Exec
389 9a6800e1 Michael Hanselmann

390 9a6800e1 Michael Hanselmann
  """
391 464243a7 Michael Hanselmann
  def __init__(self, lu):
392 464243a7 Michael Hanselmann
    self.lu = lu
393 464243a7 Michael Hanselmann
394 464243a7 Michael Hanselmann
    # Shortcuts
395 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
396 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
397 464243a7 Michael Hanselmann
398 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
399 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
400 9a6800e1 Michael Hanselmann

401 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
402 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
403 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
404 9a6800e1 Michael Hanselmann

405 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
406 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
407 9a6800e1 Michael Hanselmann

408 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
409 9a6800e1 Michael Hanselmann
    hasn't been done before.
410 9a6800e1 Michael Hanselmann

411 9a6800e1 Michael Hanselmann
    """
412 9a6800e1 Michael Hanselmann
    raise NotImplementedError
413 9a6800e1 Michael Hanselmann
414 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
415 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
416 9a6800e1 Michael Hanselmann

417 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
418 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
419 9a6800e1 Michael Hanselmann
    expected.
420 9a6800e1 Michael Hanselmann

421 9a6800e1 Michael Hanselmann
    """
422 9a6800e1 Michael Hanselmann
    raise NotImplementedError
423 9a6800e1 Michael Hanselmann
424 9a6800e1 Michael Hanselmann
425 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
426 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
427 83120a01 Michael Hanselmann

428 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
429 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
430 e4376078 Iustin Pop
  @type nodes: list
431 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
432 e4376078 Iustin Pop
  @rtype: list
433 e4376078 Iustin Pop
  @return: the list of nodes, sorted
434 083a91c9 Iustin Pop
  @raise errors.ProgrammerError: if the nodes parameter is wrong type
435 83120a01 Michael Hanselmann

436 83120a01 Michael Hanselmann
  """
437 3312b702 Iustin Pop
  if not isinstance(nodes, list):
438 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'",
439 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
440 dcb93971 Michael Hanselmann
441 ea47808a Guido Trotter
  if not nodes:
442 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
443 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
444 dcb93971 Michael Hanselmann
445 61dabca4 Iustin Pop
  wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
446 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
447 3312b702 Iustin Pop
448 3312b702 Iustin Pop
449 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
450 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
451 3312b702 Iustin Pop

452 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
453 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
454 e4376078 Iustin Pop
  @type instances: list
455 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
456 e4376078 Iustin Pop
  @rtype: list
457 e4376078 Iustin Pop
  @return: the list of instances, sorted
458 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
459 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
460 3312b702 Iustin Pop

461 3312b702 Iustin Pop
  """
462 3312b702 Iustin Pop
  if not isinstance(instances, list):
463 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'",
464 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
465 3312b702 Iustin Pop
466 3312b702 Iustin Pop
  if instances:
467 cf26a87a Iustin Pop
    wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
468 3312b702 Iustin Pop
  else:
469 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
470 a7f5dc98 Iustin Pop
  return wanted
471 dcb93971 Michael Hanselmann
472 dcb93971 Michael Hanselmann
473 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
474 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
475 83120a01 Michael Hanselmann

476 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
477 31bf511f Iustin Pop
  @param static: static fields set
478 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
479 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
480 83120a01 Michael Hanselmann

481 83120a01 Michael Hanselmann
  """
482 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
483 31bf511f Iustin Pop
  f.Extend(static)
484 31bf511f Iustin Pop
  f.Extend(dynamic)
485 dcb93971 Michael Hanselmann
486 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
487 31bf511f Iustin Pop
  if delta:
488 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
489 5c983ee5 Iustin Pop
                               % ",".join(delta), errors.ECODE_INVAL)
490 dcb93971 Michael Hanselmann
491 dcb93971 Michael Hanselmann
492 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
493 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
494 a5961235 Iustin Pop

495 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
496 a5961235 Iustin Pop
  or None (but that it always exists).
497 a5961235 Iustin Pop

498 a5961235 Iustin Pop
  """
499 a5961235 Iustin Pop
  val = getattr(op, name, None)
500 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
501 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
502 5c983ee5 Iustin Pop
                               (name, str(val)), errors.ECODE_INVAL)
503 a5961235 Iustin Pop
  setattr(op, name, val)
504 a5961235 Iustin Pop
505 a5961235 Iustin Pop
506 7736a5f2 Iustin Pop
def _CheckGlobalHvParams(params):
507 7736a5f2 Iustin Pop
  """Validates that given hypervisor params are not global ones.
508 7736a5f2 Iustin Pop

509 7736a5f2 Iustin Pop
  This will ensure that instances don't get customised versions of
510 7736a5f2 Iustin Pop
  global params.
511 7736a5f2 Iustin Pop

512 7736a5f2 Iustin Pop
  """
513 7736a5f2 Iustin Pop
  used_globals = constants.HVC_GLOBALS.intersection(params)
514 7736a5f2 Iustin Pop
  if used_globals:
515 7736a5f2 Iustin Pop
    msg = ("The following hypervisor parameters are global and cannot"
516 7736a5f2 Iustin Pop
           " be customized at instance level, please modify them at"
517 1f864b60 Iustin Pop
           " cluster level: %s" % utils.CommaJoin(used_globals))
518 7736a5f2 Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
519 7736a5f2 Iustin Pop
520 7736a5f2 Iustin Pop
521 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
522 a5961235 Iustin Pop
  """Ensure that a given node is online.
523 a5961235 Iustin Pop

524 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
525 a5961235 Iustin Pop
  @param node: the node to check
526 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
527 a5961235 Iustin Pop

528 a5961235 Iustin Pop
  """
529 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
530 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node,
531 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
532 a5961235 Iustin Pop
533 a5961235 Iustin Pop
534 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
535 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
536 733a2b6a Iustin Pop

537 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
538 733a2b6a Iustin Pop
  @param node: the node to check
539 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
540 733a2b6a Iustin Pop

541 733a2b6a Iustin Pop
  """
542 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
543 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node,
544 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
545 733a2b6a Iustin Pop
546 733a2b6a Iustin Pop
547 231cd901 Iustin Pop
def _CheckNodeHasOS(lu, node, os_name, force_variant):
548 231cd901 Iustin Pop
  """Ensure that a node supports a given OS.
549 231cd901 Iustin Pop

550 231cd901 Iustin Pop
  @param lu: the LU on behalf of which we make the check
551 231cd901 Iustin Pop
  @param node: the node to check
552 231cd901 Iustin Pop
  @param os_name: the OS to query about
553 231cd901 Iustin Pop
  @param force_variant: whether to ignore variant errors
554 231cd901 Iustin Pop
  @raise errors.OpPrereqError: if the node is not supporting the OS
555 231cd901 Iustin Pop

556 231cd901 Iustin Pop
  """
557 231cd901 Iustin Pop
  result = lu.rpc.call_os_get(node, os_name)
558 231cd901 Iustin Pop
  result.Raise("OS '%s' not in supported OS list for node %s" %
559 231cd901 Iustin Pop
               (os_name, node),
560 231cd901 Iustin Pop
               prereq=True, ecode=errors.ECODE_INVAL)
561 231cd901 Iustin Pop
  if not force_variant:
562 231cd901 Iustin Pop
    _CheckOSVariant(result.payload, os_name)
563 231cd901 Iustin Pop
564 231cd901 Iustin Pop
565 0e3baaf3 Iustin Pop
def _RequireFileStorage():
566 0e3baaf3 Iustin Pop
  """Checks that file storage is enabled.
567 0e3baaf3 Iustin Pop

568 0e3baaf3 Iustin Pop
  @raise errors.OpPrereqError: when file storage is disabled
569 0e3baaf3 Iustin Pop

570 0e3baaf3 Iustin Pop
  """
571 0e3baaf3 Iustin Pop
  if not constants.ENABLE_FILE_STORAGE:
572 0e3baaf3 Iustin Pop
    raise errors.OpPrereqError("File storage disabled at configure time",
573 0e3baaf3 Iustin Pop
                               errors.ECODE_INVAL)
574 0e3baaf3 Iustin Pop
575 0e3baaf3 Iustin Pop
576 5d55819e Iustin Pop
def _CheckDiskTemplate(template):
577 5d55819e Iustin Pop
  """Ensure a given disk template is valid.
578 5d55819e Iustin Pop

579 5d55819e Iustin Pop
  """
580 5d55819e Iustin Pop
  if template not in constants.DISK_TEMPLATES:
581 5d55819e Iustin Pop
    msg = ("Invalid disk template name '%s', valid templates are: %s" %
582 5d55819e Iustin Pop
           (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
583 5d55819e Iustin Pop
    raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
584 0e3baaf3 Iustin Pop
  if template == constants.DT_FILE:
585 0e3baaf3 Iustin Pop
    _RequireFileStorage()
586 0e3baaf3 Iustin Pop
587 0e3baaf3 Iustin Pop
588 0e3baaf3 Iustin Pop
def _CheckStorageType(storage_type):
589 0e3baaf3 Iustin Pop
  """Ensure a given storage type is valid.
590 0e3baaf3 Iustin Pop

591 0e3baaf3 Iustin Pop
  """
592 0e3baaf3 Iustin Pop
  if storage_type not in constants.VALID_STORAGE_TYPES:
593 0e3baaf3 Iustin Pop
    raise errors.OpPrereqError("Unknown storage type: %s" % storage_type,
594 f276c4b5 Iustin Pop
                               errors.ECODE_INVAL)
595 0e3baaf3 Iustin Pop
  if storage_type == constants.ST_FILE:
596 0e3baaf3 Iustin Pop
    _RequireFileStorage()
597 0e3baaf3 Iustin Pop
598 5d55819e Iustin Pop
599 5d55819e Iustin Pop
600 31624382 Iustin Pop
def _CheckInstanceDown(lu, instance, reason):
601 31624382 Iustin Pop
  """Ensure that an instance is not running."""
602 31624382 Iustin Pop
  if instance.admin_up:
603 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
604 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
605 31624382 Iustin Pop
606 31624382 Iustin Pop
  pnode = instance.primary_node
607 31624382 Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
608 31624382 Iustin Pop
  ins_l.Raise("Can't contact node %s for instance information" % pnode,
609 31624382 Iustin Pop
              prereq=True, ecode=errors.ECODE_ENVIRON)
610 31624382 Iustin Pop
611 31624382 Iustin Pop
  if instance.name in ins_l.payload:
612 31624382 Iustin Pop
    raise errors.OpPrereqError("Instance %s is running, %s" %
613 31624382 Iustin Pop
                               (instance.name, reason), errors.ECODE_STATE)
614 31624382 Iustin Pop
615 31624382 Iustin Pop
616 cf26a87a Iustin Pop
def _ExpandItemName(fn, name, kind):
617 cf26a87a Iustin Pop
  """Expand an item name.
618 cf26a87a Iustin Pop

619 cf26a87a Iustin Pop
  @param fn: the function to use for expansion
620 cf26a87a Iustin Pop
  @param name: requested item name
621 cf26a87a Iustin Pop
  @param kind: text description ('Node' or 'Instance')
622 cf26a87a Iustin Pop
  @return: the resolved (full) name
623 cf26a87a Iustin Pop
  @raise errors.OpPrereqError: if the item is not found
624 cf26a87a Iustin Pop

625 cf26a87a Iustin Pop
  """
626 cf26a87a Iustin Pop
  full_name = fn(name)
627 cf26a87a Iustin Pop
  if full_name is None:
628 cf26a87a Iustin Pop
    raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
629 cf26a87a Iustin Pop
                               errors.ECODE_NOENT)
630 cf26a87a Iustin Pop
  return full_name
631 cf26a87a Iustin Pop
632 cf26a87a Iustin Pop
633 cf26a87a Iustin Pop
def _ExpandNodeName(cfg, name):
634 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for nodes."""
635 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
636 cf26a87a Iustin Pop
637 cf26a87a Iustin Pop
638 cf26a87a Iustin Pop
def _ExpandInstanceName(cfg, name):
639 cf26a87a Iustin Pop
  """Wrapper over L{_ExpandItemName} for instance."""
640 cf26a87a Iustin Pop
  return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
641 cf26a87a Iustin Pop
642 cf26a87a Iustin Pop
643 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
644 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
645 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
646 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
647 e4376078 Iustin Pop

648 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
649 e4376078 Iustin Pop

650 e4376078 Iustin Pop
  @type name: string
651 e4376078 Iustin Pop
  @param name: the name of the instance
652 e4376078 Iustin Pop
  @type primary_node: string
653 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
654 e4376078 Iustin Pop
  @type secondary_nodes: list
655 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
656 e4376078 Iustin Pop
  @type os_type: string
657 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
658 0d68c45d Iustin Pop
  @type status: boolean
659 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
660 e4376078 Iustin Pop
  @type memory: string
661 e4376078 Iustin Pop
  @param memory: the memory size of the instance
662 e4376078 Iustin Pop
  @type vcpus: string
663 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
664 e4376078 Iustin Pop
  @type nics: list
665 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
666 5e3d3eb3 Guido Trotter
      the NICs the instance has
667 2c2690c9 Iustin Pop
  @type disk_template: string
668 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
669 2c2690c9 Iustin Pop
  @type disks: list
670 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
671 67fc3042 Iustin Pop
  @type bep: dict
672 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
673 67fc3042 Iustin Pop
  @type hvp: dict
674 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
675 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
676 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
677 e4376078 Iustin Pop
  @rtype: dict
678 e4376078 Iustin Pop
  @return: the hook environment for this instance
679 ecb215b5 Michael Hanselmann

680 396e1b78 Michael Hanselmann
  """
681 0d68c45d Iustin Pop
  if status:
682 0d68c45d Iustin Pop
    str_status = "up"
683 0d68c45d Iustin Pop
  else:
684 0d68c45d Iustin Pop
    str_status = "down"
685 396e1b78 Michael Hanselmann
  env = {
686 0e137c28 Iustin Pop
    "OP_TARGET": name,
687 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
688 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
689 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
690 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
691 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
692 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
693 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
694 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
695 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
696 396e1b78 Michael Hanselmann
  }
697 396e1b78 Michael Hanselmann
698 396e1b78 Michael Hanselmann
  if nics:
699 396e1b78 Michael Hanselmann
    nic_count = len(nics)
700 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
701 396e1b78 Michael Hanselmann
      if ip is None:
702 396e1b78 Michael Hanselmann
        ip = ""
703 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
704 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
705 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
706 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
707 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
708 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
709 396e1b78 Michael Hanselmann
  else:
710 396e1b78 Michael Hanselmann
    nic_count = 0
711 396e1b78 Michael Hanselmann
712 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
713 396e1b78 Michael Hanselmann
714 2c2690c9 Iustin Pop
  if disks:
715 2c2690c9 Iustin Pop
    disk_count = len(disks)
716 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
717 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
718 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
719 2c2690c9 Iustin Pop
  else:
720 2c2690c9 Iustin Pop
    disk_count = 0
721 2c2690c9 Iustin Pop
722 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
723 2c2690c9 Iustin Pop
724 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
725 67fc3042 Iustin Pop
    for key, value in source.items():
726 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
727 67fc3042 Iustin Pop
728 396e1b78 Michael Hanselmann
  return env
729 396e1b78 Michael Hanselmann
730 96acbc09 Michael Hanselmann
731 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
732 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
733 62f0dd02 Guido Trotter

734 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
735 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
736 62f0dd02 Guido Trotter

737 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
738 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
739 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
740 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
741 62f0dd02 Guido Trotter

742 62f0dd02 Guido Trotter
  """
743 62f0dd02 Guido Trotter
  hooks_nics = []
744 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
745 62f0dd02 Guido Trotter
  for nic in nics:
746 62f0dd02 Guido Trotter
    ip = nic.ip
747 62f0dd02 Guido Trotter
    mac = nic.mac
748 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
749 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
750 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
751 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
752 62f0dd02 Guido Trotter
  return hooks_nics
753 396e1b78 Michael Hanselmann
754 96acbc09 Michael Hanselmann
755 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
756 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
757 ecb215b5 Michael Hanselmann

758 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
759 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
760 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
761 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
762 e4376078 Iustin Pop
      environment
763 e4376078 Iustin Pop
  @type override: dict
764 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
765 e4376078 Iustin Pop
      our values
766 e4376078 Iustin Pop
  @rtype: dict
767 e4376078 Iustin Pop
  @return: the hook environment dictionary
768 e4376078 Iustin Pop

769 ecb215b5 Michael Hanselmann
  """
770 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
771 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
772 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
773 396e1b78 Michael Hanselmann
  args = {
774 396e1b78 Michael Hanselmann
    'name': instance.name,
775 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
776 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
777 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
778 0d68c45d Iustin Pop
    'status': instance.admin_up,
779 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
780 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
781 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
782 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
783 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
784 67fc3042 Iustin Pop
    'bep': bep,
785 67fc3042 Iustin Pop
    'hvp': hvp,
786 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
787 396e1b78 Michael Hanselmann
  }
788 396e1b78 Michael Hanselmann
  if override:
789 396e1b78 Michael Hanselmann
    args.update(override)
790 7260cfbe Iustin Pop
  return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142
791 396e1b78 Michael Hanselmann
792 396e1b78 Michael Hanselmann
793 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
794 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
795 ec0292f1 Iustin Pop

796 ec0292f1 Iustin Pop
  """
797 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
798 ec0292f1 Iustin Pop
  if mod_list:
799 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
800 1f864b60 Iustin Pop
               utils.CommaJoin(node.name for node in mod_list))
801 ec0292f1 Iustin Pop
    for name in mod_list:
802 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
803 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
804 ec0292f1 Iustin Pop
  if mc_now > mc_max:
805 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
806 ec0292f1 Iustin Pop
               (mc_now, mc_max))
807 ec0292f1 Iustin Pop
808 ec0292f1 Iustin Pop
809 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
810 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
811 6d7e1f20 Guido Trotter

812 6d7e1f20 Guido Trotter
  """
813 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
814 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
815 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
816 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
817 6d7e1f20 Guido Trotter
  return mc_now < mc_should
818 6d7e1f20 Guido Trotter
819 6d7e1f20 Guido Trotter
820 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
821 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
822 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
823 b165e77e Guido Trotter

824 b165e77e Guido Trotter
  """
825 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
826 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
827 b165e77e Guido Trotter
                for nic in target_nics]
828 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
829 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
830 b165e77e Guido Trotter
  if brlist:
831 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
832 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
833 045dd6d9 Iustin Pop
                 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
834 b165e77e Guido Trotter
835 b165e77e Guido Trotter
836 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
837 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
838 bf6929a2 Alexander Schreiber

839 bf6929a2 Alexander Schreiber
  """
840 b165e77e Guido Trotter
  if node is None:
841 29921401 Iustin Pop
    node = instance.primary_node
842 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
843 bf6929a2 Alexander Schreiber
844 bf6929a2 Alexander Schreiber
845 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
846 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
847 f2c05717 Guido Trotter

848 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
849 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
850 f2c05717 Guido Trotter
  @type name: string
851 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
852 f2c05717 Guido Trotter

853 f2c05717 Guido Trotter
  """
854 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
855 f2c05717 Guido Trotter
    return
856 f2c05717 Guido Trotter
  try:
857 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
858 f2c05717 Guido Trotter
  except IndexError:
859 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("OS name must include a variant",
860 5c983ee5 Iustin Pop
                               errors.ECODE_INVAL)
861 f2c05717 Guido Trotter
862 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
863 5c983ee5 Iustin Pop
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
864 f2c05717 Guido Trotter
865 f2c05717 Guido Trotter
866 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
867 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
868 5ba9701d Michael Hanselmann
869 5ba9701d Michael Hanselmann
870 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
871 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
872 e9721add Michael Hanselmann

873 e9721add Michael Hanselmann
  """
874 e9721add Michael Hanselmann
875 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
876 e9721add Michael Hanselmann
877 e9721add Michael Hanselmann
878 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
879 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
880 80cb875c Michael Hanselmann

881 80cb875c Michael Hanselmann
  """
882 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
883 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
884 80cb875c Michael Hanselmann
885 80cb875c Michael Hanselmann
886 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
887 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
888 692738fc Michael Hanselmann

889 692738fc Michael Hanselmann
  """
890 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
891 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
892 692738fc Michael Hanselmann
893 692738fc Michael Hanselmann
894 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
895 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
896 efb8da02 Michael Hanselmann

897 efb8da02 Michael Hanselmann
  """
898 efb8da02 Michael Hanselmann
  # Special case for file storage
899 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
900 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
901 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
902 efb8da02 Michael Hanselmann
903 efb8da02 Michael Hanselmann
  return []
904 efb8da02 Michael Hanselmann
905 efb8da02 Michael Hanselmann
906 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
907 2d9005d8 Michael Hanselmann
  faulty = []
908 2d9005d8 Michael Hanselmann
909 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
910 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
911 2d9005d8 Michael Hanselmann
912 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
913 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
914 045dd6d9 Iustin Pop
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
915 2d9005d8 Michael Hanselmann
916 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
917 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
918 2d9005d8 Michael Hanselmann
      faulty.append(idx)
919 2d9005d8 Michael Hanselmann
920 2d9005d8 Michael Hanselmann
  return faulty
921 2d9005d8 Michael Hanselmann
922 2d9005d8 Michael Hanselmann
923 b98bf262 Michael Hanselmann
def _FormatTimestamp(secs):
924 b98bf262 Michael Hanselmann
  """Formats a Unix timestamp with the local timezone.
925 b98bf262 Michael Hanselmann

926 b98bf262 Michael Hanselmann
  """
927 b98bf262 Michael Hanselmann
  return time.strftime("%F %T %Z", time.gmtime(secs))
928 b98bf262 Michael Hanselmann
929 b98bf262 Michael Hanselmann
930 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
931 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
932 b5f5fae9 Luca Bigliardi

933 b5f5fae9 Luca Bigliardi
  """
934 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
935 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
936 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
937 b5f5fae9 Luca Bigliardi
938 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
939 b5f5fae9 Luca Bigliardi
    """Build hooks env.
940 b5f5fae9 Luca Bigliardi

941 b5f5fae9 Luca Bigliardi
    """
942 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
943 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
944 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
945 b5f5fae9 Luca Bigliardi
946 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
947 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
948 b5f5fae9 Luca Bigliardi

949 b5f5fae9 Luca Bigliardi
    """
950 b5f5fae9 Luca Bigliardi
    return True
951 b5f5fae9 Luca Bigliardi
952 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
953 b5f5fae9 Luca Bigliardi
    """Nothing to do.
954 b5f5fae9 Luca Bigliardi

955 b5f5fae9 Luca Bigliardi
    """
956 b5f5fae9 Luca Bigliardi
    return True
957 b5f5fae9 Luca Bigliardi
958 b5f5fae9 Luca Bigliardi
959 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
960 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
961 a8083063 Iustin Pop

962 a8083063 Iustin Pop
  """
963 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
964 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
965 a8083063 Iustin Pop
  _OP_REQP = []
966 a8083063 Iustin Pop
967 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
968 b2c750a4 Luca Bigliardi
    """Build hooks env.
969 b2c750a4 Luca Bigliardi

970 b2c750a4 Luca Bigliardi
    """
971 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
972 b2c750a4 Luca Bigliardi
    return env, [], []
973 b2c750a4 Luca Bigliardi
974 a8083063 Iustin Pop
  def CheckPrereq(self):
975 a8083063 Iustin Pop
    """Check prerequisites.
976 a8083063 Iustin Pop

977 a8083063 Iustin Pop
    This checks whether the cluster is empty.
978 a8083063 Iustin Pop

979 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
980 a8083063 Iustin Pop

981 a8083063 Iustin Pop
    """
982 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
983 a8083063 Iustin Pop
984 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
985 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
986 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
987 5c983ee5 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1),
988 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
989 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
990 db915bd1 Michael Hanselmann
    if instancelist:
991 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
992 5c983ee5 Iustin Pop
                                 " this cluster." % len(instancelist),
993 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
994 a8083063 Iustin Pop
995 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
996 a8083063 Iustin Pop
    """Destroys the cluster.
997 a8083063 Iustin Pop

998 a8083063 Iustin Pop
    """
999 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1000 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1001 3141ad3b Luca Bigliardi
1002 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
1003 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
1004 3141ad3b Luca Bigliardi
    try:
1005 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
1006 3141ad3b Luca Bigliardi
    except:
1007 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
1008 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
1009 3141ad3b Luca Bigliardi
1010 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1011 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1012 b989b9d9 Ken Wehr
1013 b989b9d9 Ken Wehr
    if modify_ssh_setup:
1014 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1015 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
1016 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
1017 b989b9d9 Ken Wehr
1018 140aa4a8 Iustin Pop
    return master
1019 a8083063 Iustin Pop
1020 a8083063 Iustin Pop
1021 b98bf262 Michael Hanselmann
def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
1022 b98bf262 Michael Hanselmann
                            warn_days=constants.SSL_CERT_EXPIRATION_WARN,
1023 b98bf262 Michael Hanselmann
                            error_days=constants.SSL_CERT_EXPIRATION_ERROR):
1024 b98bf262 Michael Hanselmann
  """Verifies certificate details for LUVerifyCluster.
1025 b98bf262 Michael Hanselmann

1026 b98bf262 Michael Hanselmann
  """
1027 b98bf262 Michael Hanselmann
  if expired:
1028 b98bf262 Michael Hanselmann
    msg = "Certificate %s is expired" % filename
1029 b98bf262 Michael Hanselmann
1030 b98bf262 Michael Hanselmann
    if not_before is not None and not_after is not None:
1031 b98bf262 Michael Hanselmann
      msg += (" (valid from %s to %s)" %
1032 b98bf262 Michael Hanselmann
              (_FormatTimestamp(not_before),
1033 b98bf262 Michael Hanselmann
               _FormatTimestamp(not_after)))
1034 b98bf262 Michael Hanselmann
    elif not_before is not None:
1035 b98bf262 Michael Hanselmann
      msg += " (valid from %s)" % _FormatTimestamp(not_before)
1036 b98bf262 Michael Hanselmann
    elif not_after is not None:
1037 b98bf262 Michael Hanselmann
      msg += " (valid until %s)" % _FormatTimestamp(not_after)
1038 b98bf262 Michael Hanselmann
1039 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR, msg)
1040 b98bf262 Michael Hanselmann
1041 b98bf262 Michael Hanselmann
  elif not_before is not None and not_before > now:
1042 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_WARNING,
1043 b98bf262 Michael Hanselmann
            "Certificate %s not yet valid (valid from %s)" %
1044 b98bf262 Michael Hanselmann
            (filename, _FormatTimestamp(not_before)))
1045 b98bf262 Michael Hanselmann
1046 b98bf262 Michael Hanselmann
  elif not_after is not None:
1047 b98bf262 Michael Hanselmann
    remaining_days = int((not_after - now) / (24 * 3600))
1048 b98bf262 Michael Hanselmann
1049 b98bf262 Michael Hanselmann
    msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
1050 b98bf262 Michael Hanselmann
1051 b98bf262 Michael Hanselmann
    if remaining_days <= error_days:
1052 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_ERROR, msg)
1053 b98bf262 Michael Hanselmann
1054 b98bf262 Michael Hanselmann
    if remaining_days <= warn_days:
1055 b98bf262 Michael Hanselmann
      return (LUVerifyCluster.ETYPE_WARNING, msg)
1056 b98bf262 Michael Hanselmann
1057 b98bf262 Michael Hanselmann
  return (None, None)
1058 b98bf262 Michael Hanselmann
1059 b98bf262 Michael Hanselmann
1060 b98bf262 Michael Hanselmann
def _VerifyCertificate(filename):
1061 b98bf262 Michael Hanselmann
  """Verifies a certificate for LUVerifyCluster.
1062 b98bf262 Michael Hanselmann

1063 b98bf262 Michael Hanselmann
  @type filename: string
1064 b98bf262 Michael Hanselmann
  @param filename: Path to PEM file
1065 b98bf262 Michael Hanselmann

1066 b98bf262 Michael Hanselmann
  """
1067 b98bf262 Michael Hanselmann
  try:
1068 b98bf262 Michael Hanselmann
    cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
1069 b98bf262 Michael Hanselmann
                                           utils.ReadFile(filename))
1070 b98bf262 Michael Hanselmann
  except Exception, err: # pylint: disable-msg=W0703
1071 b98bf262 Michael Hanselmann
    return (LUVerifyCluster.ETYPE_ERROR,
1072 b98bf262 Michael Hanselmann
            "Failed to load X509 certificate %s: %s" % (filename, err))
1073 b98bf262 Michael Hanselmann
1074 b98bf262 Michael Hanselmann
  # Depending on the pyOpenSSL version, this can just return (None, None)
1075 b98bf262 Michael Hanselmann
  (not_before, not_after) = utils.GetX509CertValidity(cert)
1076 b98bf262 Michael Hanselmann
1077 b98bf262 Michael Hanselmann
  return _VerifyCertificateInner(filename, cert.has_expired(),
1078 b98bf262 Michael Hanselmann
                                 not_before, not_after, time.time())
1079 b98bf262 Michael Hanselmann
1080 b98bf262 Michael Hanselmann
1081 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
1082 a8083063 Iustin Pop
  """Verifies the cluster status.
1083 a8083063 Iustin Pop

1084 a8083063 Iustin Pop
  """
1085 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
1086 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
1087 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
1088 d4b9d97f Guido Trotter
  REQ_BGL = False
1089 d4b9d97f Guido Trotter
1090 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
1091 7c874ee1 Iustin Pop
  TNODE = "node"
1092 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
1093 7c874ee1 Iustin Pop
1094 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
1095 b98bf262 Michael Hanselmann
  ECLUSTERCERT = (TCLUSTER, "ECLUSTERCERT")
1096 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
1097 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
1098 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
1099 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1100 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
1101 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
1102 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
1103 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
1104 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
1105 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
1106 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
1107 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
1108 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
1109 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
1110 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
1111 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
1112 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
1113 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
1114 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
1115 313b2dd4 Michael Hanselmann
  ENODETIME = (TNODE, "ENODETIME")
1116 7c874ee1 Iustin Pop
1117 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
1118 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
1119 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
1120 a0c9776a Iustin Pop
1121 02c521e4 Iustin Pop
  class NodeImage(object):
1122 02c521e4 Iustin Pop
    """A class representing the logical and physical status of a node.
1123 02c521e4 Iustin Pop

1124 02c521e4 Iustin Pop
    @ivar volumes: a structure as returned from
1125 3a488770 Iustin Pop
        L{ganeti.backend.GetVolumeList} (runtime)
1126 02c521e4 Iustin Pop
    @ivar instances: a list of running instances (runtime)
1127 02c521e4 Iustin Pop
    @ivar pinst: list of configured primary instances (config)
1128 02c521e4 Iustin Pop
    @ivar sinst: list of configured secondary instances (config)
1129 02c521e4 Iustin Pop
    @ivar sbp: diction of {secondary-node: list of instances} of all peers
1130 02c521e4 Iustin Pop
        of this node (config)
1131 02c521e4 Iustin Pop
    @ivar mfree: free memory, as reported by hypervisor (runtime)
1132 02c521e4 Iustin Pop
    @ivar dfree: free disk, as reported by the node (runtime)
1133 02c521e4 Iustin Pop
    @ivar offline: the offline status (config)
1134 02c521e4 Iustin Pop
    @type rpc_fail: boolean
1135 02c521e4 Iustin Pop
    @ivar rpc_fail: whether the RPC verify call was successfull (overall,
1136 02c521e4 Iustin Pop
        not whether the individual keys were correct) (runtime)
1137 02c521e4 Iustin Pop
    @type lvm_fail: boolean
1138 02c521e4 Iustin Pop
    @ivar lvm_fail: whether the RPC call didn't return valid LVM data
1139 02c521e4 Iustin Pop
    @type hyp_fail: boolean
1140 02c521e4 Iustin Pop
    @ivar hyp_fail: whether the RPC call didn't return the instance list
1141 02c521e4 Iustin Pop
    @type ghost: boolean
1142 02c521e4 Iustin Pop
    @ivar ghost: whether this is a known node or not (config)
1143 02c521e4 Iustin Pop

1144 02c521e4 Iustin Pop
    """
1145 02c521e4 Iustin Pop
    def __init__(self, offline=False):
1146 02c521e4 Iustin Pop
      self.volumes = {}
1147 02c521e4 Iustin Pop
      self.instances = []
1148 02c521e4 Iustin Pop
      self.pinst = []
1149 02c521e4 Iustin Pop
      self.sinst = []
1150 02c521e4 Iustin Pop
      self.sbp = {}
1151 02c521e4 Iustin Pop
      self.mfree = 0
1152 02c521e4 Iustin Pop
      self.dfree = 0
1153 02c521e4 Iustin Pop
      self.offline = offline
1154 02c521e4 Iustin Pop
      self.rpc_fail = False
1155 02c521e4 Iustin Pop
      self.lvm_fail = False
1156 02c521e4 Iustin Pop
      self.hyp_fail = False
1157 02c521e4 Iustin Pop
      self.ghost = False
1158 02c521e4 Iustin Pop
1159 d4b9d97f Guido Trotter
  def ExpandNames(self):
1160 d4b9d97f Guido Trotter
    self.needed_locks = {
1161 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1162 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1163 d4b9d97f Guido Trotter
    }
1164 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1165 a8083063 Iustin Pop
1166 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
1167 7c874ee1 Iustin Pop
    """Format an error message.
1168 7c874ee1 Iustin Pop

1169 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
1170 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
1171 7c874ee1 Iustin Pop

1172 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
1173 7c874ee1 Iustin Pop

1174 7c874ee1 Iustin Pop
    """
1175 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
1176 7c874ee1 Iustin Pop
    itype, etxt = ecode
1177 7c874ee1 Iustin Pop
    # first complete the msg
1178 7c874ee1 Iustin Pop
    if args:
1179 7c874ee1 Iustin Pop
      msg = msg % args
1180 7c874ee1 Iustin Pop
    # then format the whole message
1181 7c874ee1 Iustin Pop
    if self.op.error_codes:
1182 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
1183 7c874ee1 Iustin Pop
    else:
1184 7c874ee1 Iustin Pop
      if item:
1185 7c874ee1 Iustin Pop
        item = " " + item
1186 7c874ee1 Iustin Pop
      else:
1187 7c874ee1 Iustin Pop
        item = ""
1188 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
1189 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
1190 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
1191 7c874ee1 Iustin Pop
1192 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
1193 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
1194 a0c9776a Iustin Pop

1195 a0c9776a Iustin Pop
    """
1196 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
1197 a0c9776a Iustin Pop
    if cond:
1198 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
1199 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
1200 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
1201 a0c9776a Iustin Pop
      self.bad = self.bad or cond
1202 a0c9776a Iustin Pop
1203 02c521e4 Iustin Pop
  def _VerifyNode(self, ninfo, nresult):
1204 a8083063 Iustin Pop
    """Run multiple tests against a node.
1205 a8083063 Iustin Pop

1206 112f18a5 Iustin Pop
    Test list:
1207 e4376078 Iustin Pop

1208 a8083063 Iustin Pop
      - compares ganeti version
1209 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
1210 a8083063 Iustin Pop
      - checks config file checksum
1211 a8083063 Iustin Pop
      - checks ssh to other nodes
1212 a8083063 Iustin Pop

1213 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1214 02c521e4 Iustin Pop
    @param ninfo: the node to check
1215 02c521e4 Iustin Pop
    @param nresult: the results from the node
1216 02c521e4 Iustin Pop
    @rtype: boolean
1217 02c521e4 Iustin Pop
    @return: whether overall this call was successful (and we can expect
1218 02c521e4 Iustin Pop
         reasonable values in the respose)
1219 098c0958 Michael Hanselmann

1220 a8083063 Iustin Pop
    """
1221 02c521e4 Iustin Pop
    node = ninfo.name
1222 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1223 25361b9a Iustin Pop
1224 02c521e4 Iustin Pop
    # main result, nresult should be a non-empty dict
1225 02c521e4 Iustin Pop
    test = not nresult or not isinstance(nresult, dict)
1226 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1227 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1228 a0c9776a Iustin Pop
    if test:
1229 02c521e4 Iustin Pop
      return False
1230 25361b9a Iustin Pop
1231 a8083063 Iustin Pop
    # compares ganeti version
1232 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1233 02c521e4 Iustin Pop
    remote_version = nresult.get("version", None)
1234 a0c9776a Iustin Pop
    test = not (remote_version and
1235 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1236 a0c9776a Iustin Pop
                len(remote_version) == 2)
1237 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1238 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1239 a0c9776a Iustin Pop
    if test:
1240 02c521e4 Iustin Pop
      return False
1241 a0c9776a Iustin Pop
1242 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1243 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1244 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1245 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1246 a0c9776a Iustin Pop
    if test:
1247 02c521e4 Iustin Pop
      return False
1248 a8083063 Iustin Pop
1249 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1250 a8083063 Iustin Pop
1251 e9ce0a64 Iustin Pop
    # full package version
1252 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1253 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1254 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1255 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1256 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1257 e9ce0a64 Iustin Pop
1258 02c521e4 Iustin Pop
    hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
1259 02c521e4 Iustin Pop
    if isinstance(hyp_result, dict):
1260 02c521e4 Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1261 02c521e4 Iustin Pop
        test = hv_result is not None
1262 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1263 02c521e4 Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1264 a8083063 Iustin Pop
1265 a8083063 Iustin Pop
1266 02c521e4 Iustin Pop
    test = nresult.get(constants.NV_NODESETUP,
1267 02c521e4 Iustin Pop
                           ["Missing NODESETUP results"])
1268 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1269 02c521e4 Iustin Pop
             "; ".join(test))
1270 02c521e4 Iustin Pop
1271 02c521e4 Iustin Pop
    return True
1272 02c521e4 Iustin Pop
1273 02c521e4 Iustin Pop
  def _VerifyNodeTime(self, ninfo, nresult,
1274 02c521e4 Iustin Pop
                      nvinfo_starttime, nvinfo_endtime):
1275 02c521e4 Iustin Pop
    """Check the node time.
1276 02c521e4 Iustin Pop

1277 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1278 02c521e4 Iustin Pop
    @param ninfo: the node to check
1279 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1280 02c521e4 Iustin Pop
    @param nvinfo_starttime: the start time of the RPC call
1281 02c521e4 Iustin Pop
    @param nvinfo_endtime: the end time of the RPC call
1282 02c521e4 Iustin Pop

1283 02c521e4 Iustin Pop
    """
1284 02c521e4 Iustin Pop
    node = ninfo.name
1285 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1286 02c521e4 Iustin Pop
1287 02c521e4 Iustin Pop
    ntime = nresult.get(constants.NV_TIME, None)
1288 02c521e4 Iustin Pop
    try:
1289 02c521e4 Iustin Pop
      ntime_merged = utils.MergeTime(ntime)
1290 02c521e4 Iustin Pop
    except (ValueError, TypeError):
1291 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODETIME, node, "Node returned invalid time")
1292 02c521e4 Iustin Pop
      return
1293 02c521e4 Iustin Pop
1294 02c521e4 Iustin Pop
    if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
1295 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
1296 02c521e4 Iustin Pop
    elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
1297 02c521e4 Iustin Pop
      ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
1298 02c521e4 Iustin Pop
    else:
1299 02c521e4 Iustin Pop
      ntime_diff = None
1300 02c521e4 Iustin Pop
1301 02c521e4 Iustin Pop
    _ErrorIf(ntime_diff is not None, self.ENODETIME, node,
1302 02c521e4 Iustin Pop
             "Node time diverges by at least %s from master node time",
1303 02c521e4 Iustin Pop
             ntime_diff)
1304 02c521e4 Iustin Pop
1305 02c521e4 Iustin Pop
  def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
1306 02c521e4 Iustin Pop
    """Check the node time.
1307 02c521e4 Iustin Pop

1308 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1309 02c521e4 Iustin Pop
    @param ninfo: the node to check
1310 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1311 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1312 02c521e4 Iustin Pop

1313 02c521e4 Iustin Pop
    """
1314 02c521e4 Iustin Pop
    if vg_name is None:
1315 02c521e4 Iustin Pop
      return
1316 02c521e4 Iustin Pop
1317 02c521e4 Iustin Pop
    node = ninfo.name
1318 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1319 02c521e4 Iustin Pop
1320 02c521e4 Iustin Pop
    # checks vg existence and size > 20G
1321 02c521e4 Iustin Pop
    vglist = nresult.get(constants.NV_VGLIST, None)
1322 02c521e4 Iustin Pop
    test = not vglist
1323 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1324 02c521e4 Iustin Pop
    if not test:
1325 02c521e4 Iustin Pop
      vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1326 02c521e4 Iustin Pop
                                            constants.MIN_VG_SIZE)
1327 02c521e4 Iustin Pop
      _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1328 02c521e4 Iustin Pop
1329 02c521e4 Iustin Pop
    # check pv names
1330 02c521e4 Iustin Pop
    pvlist = nresult.get(constants.NV_PVLIST, None)
1331 02c521e4 Iustin Pop
    test = pvlist is None
1332 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODELVM, node, "Can't get PV list from node")
1333 a0c9776a Iustin Pop
    if not test:
1334 02c521e4 Iustin Pop
      # check that ':' is not present in PV names, since it's a
1335 02c521e4 Iustin Pop
      # special character for lvcreate (denotes the range of PEs to
1336 02c521e4 Iustin Pop
      # use on the PV)
1337 02c521e4 Iustin Pop
      for _, pvname, owner_vg in pvlist:
1338 02c521e4 Iustin Pop
        test = ":" in pvname
1339 02c521e4 Iustin Pop
        _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV"
1340 02c521e4 Iustin Pop
                 " '%s' of VG '%s'", pvname, owner_vg)
1341 02c521e4 Iustin Pop
1342 02c521e4 Iustin Pop
  def _VerifyNodeNetwork(self, ninfo, nresult):
1343 02c521e4 Iustin Pop
    """Check the node time.
1344 02c521e4 Iustin Pop

1345 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1346 02c521e4 Iustin Pop
    @param ninfo: the node to check
1347 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1348 02c521e4 Iustin Pop

1349 02c521e4 Iustin Pop
    """
1350 02c521e4 Iustin Pop
    node = ninfo.name
1351 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1352 02c521e4 Iustin Pop
1353 02c521e4 Iustin Pop
    test = constants.NV_NODELIST not in nresult
1354 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1355 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1356 a0c9776a Iustin Pop
    if not test:
1357 02c521e4 Iustin Pop
      if nresult[constants.NV_NODELIST]:
1358 02c521e4 Iustin Pop
        for a_node, a_msg in nresult[constants.NV_NODELIST].items():
1359 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1360 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1361 25361b9a Iustin Pop
1362 02c521e4 Iustin Pop
    test = constants.NV_NODENETTEST not in nresult
1363 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1364 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1365 a0c9776a Iustin Pop
    if not test:
1366 02c521e4 Iustin Pop
      if nresult[constants.NV_NODENETTEST]:
1367 02c521e4 Iustin Pop
        nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
1368 7c874ee1 Iustin Pop
        for anode in nlist:
1369 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1370 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1371 02c521e4 Iustin Pop
                   anode, nresult[constants.NV_NODENETTEST][anode])
1372 a8083063 Iustin Pop
1373 02c521e4 Iustin Pop
  def _VerifyInstance(self, instance, instanceconfig, node_image):
1374 a8083063 Iustin Pop
    """Verify an instance.
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1377 a8083063 Iustin Pop
    available on the instance's node.
1378 a8083063 Iustin Pop

1379 a8083063 Iustin Pop
    """
1380 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1381 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1382 a8083063 Iustin Pop
1383 a8083063 Iustin Pop
    node_vol_should = {}
1384 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1385 a8083063 Iustin Pop
1386 a8083063 Iustin Pop
    for node in node_vol_should:
1387 02c521e4 Iustin Pop
      n_img = node_image[node]
1388 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1389 02c521e4 Iustin Pop
        # ignore missing volumes on offline or broken nodes
1390 0a66c968 Iustin Pop
        continue
1391 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1392 02c521e4 Iustin Pop
        test = volume not in n_img.volumes
1393 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1394 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1395 a8083063 Iustin Pop
1396 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1397 02c521e4 Iustin Pop
      pri_img = node_image[node_current]
1398 02c521e4 Iustin Pop
      test = instance not in pri_img.instances and not pri_img.offline
1399 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1400 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1401 a0c9776a Iustin Pop
               node_current)
1402 a8083063 Iustin Pop
1403 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1404 a8083063 Iustin Pop
      if (not node == node_current):
1405 02c521e4 Iustin Pop
        test = instance in n_img.instances
1406 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1407 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1408 a8083063 Iustin Pop
1409 02c521e4 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_image):
1410 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1411 a8083063 Iustin Pop

1412 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1413 a8083063 Iustin Pop
    reported as unknown.
1414 a8083063 Iustin Pop

1415 a8083063 Iustin Pop
    """
1416 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1417 02c521e4 Iustin Pop
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
1418 02c521e4 Iustin Pop
        # skip non-healthy nodes
1419 02c521e4 Iustin Pop
        continue
1420 02c521e4 Iustin Pop
      for volume in n_img.volumes:
1421 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1422 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1423 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1424 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1425 a8083063 Iustin Pop
1426 02c521e4 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_image):
1427 a8083063 Iustin Pop
    """Verify the list of running instances.
1428 a8083063 Iustin Pop

1429 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1430 a8083063 Iustin Pop

1431 a8083063 Iustin Pop
    """
1432 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1433 02c521e4 Iustin Pop
      for o_inst in n_img.instances:
1434 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1435 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1436 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1437 a8083063 Iustin Pop
1438 02c521e4 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_image, instance_cfg):
1439 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1440 2b3b6ddd Guido Trotter

1441 02c521e4 Iustin Pop
    Check that if one single node dies we can still start all the
1442 02c521e4 Iustin Pop
    instances it was primary for.
1443 2b3b6ddd Guido Trotter

1444 2b3b6ddd Guido Trotter
    """
1445 02c521e4 Iustin Pop
    for node, n_img in node_image.items():
1446 02c521e4 Iustin Pop
      # This code checks that every node which is now listed as
1447 02c521e4 Iustin Pop
      # secondary has enough memory to host all instances it is
1448 02c521e4 Iustin Pop
      # supposed to should a single other node in the cluster fail.
1449 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1450 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1451 02c521e4 Iustin Pop
      # WARNING: we currently take into account down instances as well
1452 02c521e4 Iustin Pop
      # as up ones, considering that even if they're down someone
1453 02c521e4 Iustin Pop
      # might want to start them even in the event of a node failure.
1454 02c521e4 Iustin Pop
      for prinode, instances in n_img.sbp.items():
1455 2b3b6ddd Guido Trotter
        needed_mem = 0
1456 2b3b6ddd Guido Trotter
        for instance in instances:
1457 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1458 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1459 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1460 02c521e4 Iustin Pop
        test = n_img.mfree < needed_mem
1461 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1462 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1463 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1464 2b3b6ddd Guido Trotter
1465 02c521e4 Iustin Pop
  def _VerifyNodeFiles(self, ninfo, nresult, file_list, local_cksum,
1466 02c521e4 Iustin Pop
                       master_files):
1467 02c521e4 Iustin Pop
    """Verifies and computes the node required file checksums.
1468 02c521e4 Iustin Pop

1469 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1470 02c521e4 Iustin Pop
    @param ninfo: the node to check
1471 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1472 02c521e4 Iustin Pop
    @param file_list: required list of files
1473 02c521e4 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
1474 02c521e4 Iustin Pop
    @param master_files: list of files that only masters should have
1475 02c521e4 Iustin Pop

1476 02c521e4 Iustin Pop
    """
1477 02c521e4 Iustin Pop
    node = ninfo.name
1478 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1479 02c521e4 Iustin Pop
1480 02c521e4 Iustin Pop
    remote_cksum = nresult.get(constants.NV_FILELIST, None)
1481 02c521e4 Iustin Pop
    test = not isinstance(remote_cksum, dict)
1482 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1483 02c521e4 Iustin Pop
             "node hasn't returned file checksum data")
1484 02c521e4 Iustin Pop
    if test:
1485 02c521e4 Iustin Pop
      return
1486 02c521e4 Iustin Pop
1487 02c521e4 Iustin Pop
    for file_name in file_list:
1488 02c521e4 Iustin Pop
      node_is_mc = ninfo.master_candidate
1489 02c521e4 Iustin Pop
      must_have = (file_name not in master_files) or node_is_mc
1490 02c521e4 Iustin Pop
      # missing
1491 02c521e4 Iustin Pop
      test1 = file_name not in remote_cksum
1492 02c521e4 Iustin Pop
      # invalid checksum
1493 02c521e4 Iustin Pop
      test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1494 02c521e4 Iustin Pop
      # existing and good
1495 02c521e4 Iustin Pop
      test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1496 02c521e4 Iustin Pop
      _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1497 02c521e4 Iustin Pop
               "file '%s' missing", file_name)
1498 02c521e4 Iustin Pop
      _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1499 02c521e4 Iustin Pop
               "file '%s' has wrong checksum", file_name)
1500 02c521e4 Iustin Pop
      # not candidate and this is not a must-have file
1501 02c521e4 Iustin Pop
      _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1502 02c521e4 Iustin Pop
               "file '%s' should not exist on non master"
1503 02c521e4 Iustin Pop
               " candidates (and the file is outdated)", file_name)
1504 02c521e4 Iustin Pop
      # all good, except non-master/non-must have combination
1505 02c521e4 Iustin Pop
      _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1506 02c521e4 Iustin Pop
               "file '%s' should not exist"
1507 02c521e4 Iustin Pop
               " on non master candidates", file_name)
1508 02c521e4 Iustin Pop
1509 02c521e4 Iustin Pop
  def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, drbd_map):
1510 02c521e4 Iustin Pop
    """Verifies and the node DRBD status.
1511 02c521e4 Iustin Pop

1512 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1513 02c521e4 Iustin Pop
    @param ninfo: the node to check
1514 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1515 02c521e4 Iustin Pop
    @param instanceinfo: the dict of instances
1516 02c521e4 Iustin Pop
    @param drbd_map: the DRBD map as returned by
1517 02c521e4 Iustin Pop
        L{ganeti.config.ConfigWriter.ComputeDRBDMap}
1518 02c521e4 Iustin Pop

1519 02c521e4 Iustin Pop
    """
1520 02c521e4 Iustin Pop
    node = ninfo.name
1521 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1522 02c521e4 Iustin Pop
1523 02c521e4 Iustin Pop
    # compute the DRBD minors
1524 02c521e4 Iustin Pop
    node_drbd = {}
1525 02c521e4 Iustin Pop
    for minor, instance in drbd_map[node].items():
1526 02c521e4 Iustin Pop
      test = instance not in instanceinfo
1527 02c521e4 Iustin Pop
      _ErrorIf(test, self.ECLUSTERCFG, None,
1528 02c521e4 Iustin Pop
               "ghost instance '%s' in temporary DRBD map", instance)
1529 02c521e4 Iustin Pop
        # ghost instance should not be running, but otherwise we
1530 02c521e4 Iustin Pop
        # don't give double warnings (both ghost instance and
1531 02c521e4 Iustin Pop
        # unallocated minor in use)
1532 02c521e4 Iustin Pop
      if test:
1533 02c521e4 Iustin Pop
        node_drbd[minor] = (instance, False)
1534 02c521e4 Iustin Pop
      else:
1535 02c521e4 Iustin Pop
        instance = instanceinfo[instance]
1536 02c521e4 Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
1537 02c521e4 Iustin Pop
1538 02c521e4 Iustin Pop
    # and now check them
1539 02c521e4 Iustin Pop
    used_minors = nresult.get(constants.NV_DRBDLIST, [])
1540 02c521e4 Iustin Pop
    test = not isinstance(used_minors, (tuple, list))
1541 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEDRBD, node,
1542 02c521e4 Iustin Pop
             "cannot parse drbd status file: %s", str(used_minors))
1543 02c521e4 Iustin Pop
    if test:
1544 02c521e4 Iustin Pop
      # we cannot check drbd status
1545 02c521e4 Iustin Pop
      return
1546 02c521e4 Iustin Pop
1547 02c521e4 Iustin Pop
    for minor, (iname, must_exist) in node_drbd.items():
1548 02c521e4 Iustin Pop
      test = minor not in used_minors and must_exist
1549 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1550 02c521e4 Iustin Pop
               "drbd minor %d of instance %s is not active", minor, iname)
1551 02c521e4 Iustin Pop
    for minor in used_minors:
1552 02c521e4 Iustin Pop
      test = minor not in node_drbd
1553 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1554 02c521e4 Iustin Pop
               "unallocated drbd minor %d is in use", minor)
1555 02c521e4 Iustin Pop
1556 02c521e4 Iustin Pop
  def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
1557 02c521e4 Iustin Pop
    """Verifies and updates the node volume data.
1558 02c521e4 Iustin Pop

1559 02c521e4 Iustin Pop
    This function will update a L{NodeImage}'s internal structures
1560 02c521e4 Iustin Pop
    with data from the remote call.
1561 02c521e4 Iustin Pop

1562 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1563 02c521e4 Iustin Pop
    @param ninfo: the node to check
1564 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1565 02c521e4 Iustin Pop
    @param nimg: the node image object
1566 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1567 02c521e4 Iustin Pop

1568 02c521e4 Iustin Pop
    """
1569 02c521e4 Iustin Pop
    node = ninfo.name
1570 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1571 02c521e4 Iustin Pop
1572 02c521e4 Iustin Pop
    nimg.lvm_fail = True
1573 02c521e4 Iustin Pop
    lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1574 02c521e4 Iustin Pop
    if vg_name is None:
1575 02c521e4 Iustin Pop
      pass
1576 02c521e4 Iustin Pop
    elif isinstance(lvdata, basestring):
1577 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1578 02c521e4 Iustin Pop
               utils.SafeEncode(lvdata))
1579 02c521e4 Iustin Pop
    elif not isinstance(lvdata, dict):
1580 02c521e4 Iustin Pop
      _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1581 02c521e4 Iustin Pop
    else:
1582 02c521e4 Iustin Pop
      nimg.volumes = lvdata
1583 02c521e4 Iustin Pop
      nimg.lvm_fail = False
1584 02c521e4 Iustin Pop
1585 02c521e4 Iustin Pop
  def _UpdateNodeInstances(self, ninfo, nresult, nimg):
1586 02c521e4 Iustin Pop
    """Verifies and updates the node instance list.
1587 02c521e4 Iustin Pop

1588 02c521e4 Iustin Pop
    If the listing was successful, then updates this node's instance
1589 02c521e4 Iustin Pop
    list. Otherwise, it marks the RPC call as failed for the instance
1590 02c521e4 Iustin Pop
    list key.
1591 02c521e4 Iustin Pop

1592 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1593 02c521e4 Iustin Pop
    @param ninfo: the node to check
1594 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1595 02c521e4 Iustin Pop
    @param nimg: the node image object
1596 02c521e4 Iustin Pop

1597 02c521e4 Iustin Pop
    """
1598 02c521e4 Iustin Pop
    idata = nresult.get(constants.NV_INSTANCELIST, None)
1599 02c521e4 Iustin Pop
    test = not isinstance(idata, list)
1600 02c521e4 Iustin Pop
    self._ErrorIf(test, self.ENODEHV, ninfo.name, "rpc call to node failed"
1601 02c521e4 Iustin Pop
                  " (instancelist): %s", utils.SafeEncode(str(idata)))
1602 02c521e4 Iustin Pop
    if test:
1603 02c521e4 Iustin Pop
      nimg.hyp_fail = True
1604 02c521e4 Iustin Pop
    else:
1605 02c521e4 Iustin Pop
      nimg.instances = idata
1606 02c521e4 Iustin Pop
1607 02c521e4 Iustin Pop
  def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
1608 02c521e4 Iustin Pop
    """Verifies and computes a node information map
1609 02c521e4 Iustin Pop

1610 02c521e4 Iustin Pop
    @type ninfo: L{objects.Node}
1611 02c521e4 Iustin Pop
    @param ninfo: the node to check
1612 02c521e4 Iustin Pop
    @param nresult: the remote results for the node
1613 02c521e4 Iustin Pop
    @param nimg: the node image object
1614 02c521e4 Iustin Pop
    @param vg_name: the configured VG name
1615 02c521e4 Iustin Pop

1616 02c521e4 Iustin Pop
    """
1617 02c521e4 Iustin Pop
    node = ninfo.name
1618 02c521e4 Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1619 02c521e4 Iustin Pop
1620 02c521e4 Iustin Pop
    # try to read free memory (from the hypervisor)
1621 02c521e4 Iustin Pop
    hv_info = nresult.get(constants.NV_HVINFO, None)
1622 02c521e4 Iustin Pop
    test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
1623 02c521e4 Iustin Pop
    _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1624 02c521e4 Iustin Pop
    if not test:
1625 02c521e4 Iustin Pop
      try:
1626 02c521e4 Iustin Pop
        nimg.mfree = int(hv_info["memory_free"])
1627 02c521e4 Iustin Pop
      except (ValueError, TypeError):
1628 02c521e4 Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1629 02c521e4 Iustin Pop
                 "node returned invalid nodeinfo, check hypervisor")
1630 02c521e4 Iustin Pop
1631 02c521e4 Iustin Pop
    # FIXME: devise a free space model for file based instances as well
1632 02c521e4 Iustin Pop
    if vg_name is not None:
1633 02c521e4 Iustin Pop
      test = (constants.NV_VGLIST not in nresult or
1634 02c521e4 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST])
1635 02c521e4 Iustin Pop
      _ErrorIf(test, self.ENODELVM, node,
1636 02c521e4 Iustin Pop
               "node didn't return data for the volume group '%s'"
1637 02c521e4 Iustin Pop
               " - it is either missing or broken", vg_name)
1638 02c521e4 Iustin Pop
      if not test:
1639 02c521e4 Iustin Pop
        try:
1640 02c521e4 Iustin Pop
          nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
1641 02c521e4 Iustin Pop
        except (ValueError, TypeError):
1642 02c521e4 Iustin Pop
          _ErrorIf(True, self.ENODERPC, node,
1643 02c521e4 Iustin Pop
                   "node returned invalid LVM info, check LVM status")
1644 02c521e4 Iustin Pop
1645 a8083063 Iustin Pop
  def CheckPrereq(self):
1646 a8083063 Iustin Pop
    """Check prerequisites.
1647 a8083063 Iustin Pop

1648 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1649 e54c4c5e Guido Trotter
    all its members are valid.
1650 a8083063 Iustin Pop

1651 a8083063 Iustin Pop
    """
1652 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1653 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1654 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid checks to be skipped specified",
1655 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
1656 a8083063 Iustin Pop
1657 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1658 d8fff41c Guido Trotter
    """Build hooks env.
1659 d8fff41c Guido Trotter

1660 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1661 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1662 d8fff41c Guido Trotter

1663 d8fff41c Guido Trotter
    """
1664 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1665 35e994e9 Iustin Pop
    env = {
1666 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1667 35e994e9 Iustin Pop
      }
1668 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1669 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1670 35e994e9 Iustin Pop
1671 d8fff41c Guido Trotter
    return env, [], all_nodes
1672 d8fff41c Guido Trotter
1673 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1674 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1675 a8083063 Iustin Pop

1676 a8083063 Iustin Pop
    """
1677 a0c9776a Iustin Pop
    self.bad = False
1678 7260cfbe Iustin Pop
    _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103
1679 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1680 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1681 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1682 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1683 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1684 a8083063 Iustin Pop
1685 b98bf262 Michael Hanselmann
    # Check the cluster certificates
1686 b98bf262 Michael Hanselmann
    for cert_filename in constants.ALL_CERT_FILES:
1687 b98bf262 Michael Hanselmann
      (errcode, msg) = _VerifyCertificate(cert_filename)
1688 b98bf262 Michael Hanselmann
      _ErrorIf(errcode, self.ECLUSTERCERT, None, msg, code=errcode)
1689 b98bf262 Michael Hanselmann
1690 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1691 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1692 58385fad Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1693 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1694 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1695 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1696 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1697 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1698 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1699 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1700 02c521e4 Iustin Pop
    n_offline = 0 # Count of offline nodes
1701 02c521e4 Iustin Pop
    n_drained = 0 # Count of nodes being drained
1702 02c521e4 Iustin Pop
    node_vol_should = {}
1703 a8083063 Iustin Pop
1704 a8083063 Iustin Pop
    # FIXME: verify OS list
1705 a8083063 Iustin Pop
    # do local checksums
1706 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1707 112f18a5 Iustin Pop
1708 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1709 d3100055 Michael Hanselmann
    file_names.extend(constants.ALL_CERT_FILES)
1710 112f18a5 Iustin Pop
    file_names.extend(master_files)
1711 58385fad Iustin Pop
    if cluster.modify_etc_hosts:
1712 58385fad Iustin Pop
      file_names.append(constants.ETC_HOSTS)
1713 112f18a5 Iustin Pop
1714 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1717 a8083063 Iustin Pop
    node_verify_param = {
1718 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1719 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1720 82e37788 Iustin Pop
                              if not node.offline],
1721 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1722 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1723 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1724 82e37788 Iustin Pop
                                 if not node.offline],
1725 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1726 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1727 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1728 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1729 313b2dd4 Michael Hanselmann
      constants.NV_TIME: None,
1730 a8083063 Iustin Pop
      }
1731 313b2dd4 Michael Hanselmann
1732 cc9e1230 Guido Trotter
    if vg_name is not None:
1733 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1734 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1735 d091393e Iustin Pop
      node_verify_param[constants.NV_PVLIST] = [vg_name]
1736 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1737 313b2dd4 Michael Hanselmann
1738 02c521e4 Iustin Pop
    # Build our expected cluster state
1739 02c521e4 Iustin Pop
    node_image = dict((node.name, self.NodeImage(offline=node.offline))
1740 02c521e4 Iustin Pop
                      for node in nodeinfo)
1741 02c521e4 Iustin Pop
1742 02c521e4 Iustin Pop
    for instance in instancelist:
1743 02c521e4 Iustin Pop
      inst_config = instanceinfo[instance]
1744 02c521e4 Iustin Pop
1745 02c521e4 Iustin Pop
      for nname in inst_config.all_nodes:
1746 02c521e4 Iustin Pop
        if nname not in node_image:
1747 02c521e4 Iustin Pop
          # ghost node
1748 02c521e4 Iustin Pop
          gnode = self.NodeImage()
1749 02c521e4 Iustin Pop
          gnode.ghost = True
1750 02c521e4 Iustin Pop
          node_image[nname] = gnode
1751 02c521e4 Iustin Pop
1752 02c521e4 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1753 02c521e4 Iustin Pop
1754 02c521e4 Iustin Pop
      pnode = inst_config.primary_node
1755 02c521e4 Iustin Pop
      node_image[pnode].pinst.append(instance)
1756 02c521e4 Iustin Pop
1757 02c521e4 Iustin Pop
      for snode in inst_config.secondary_nodes:
1758 02c521e4 Iustin Pop
        nimg = node_image[snode]
1759 02c521e4 Iustin Pop
        nimg.sinst.append(instance)
1760 02c521e4 Iustin Pop
        if pnode not in nimg.sbp:
1761 02c521e4 Iustin Pop
          nimg.sbp[pnode] = []
1762 02c521e4 Iustin Pop
        nimg.sbp[pnode].append(instance)
1763 02c521e4 Iustin Pop
1764 02c521e4 Iustin Pop
    # At this point, we have the in-memory data structures complete,
1765 02c521e4 Iustin Pop
    # except for the runtime information, which we'll gather next
1766 02c521e4 Iustin Pop
1767 313b2dd4 Michael Hanselmann
    # Due to the way our RPC system works, exact response times cannot be
1768 313b2dd4 Michael Hanselmann
    # guaranteed (e.g. a broken node could run into a timeout). By keeping the
1769 313b2dd4 Michael Hanselmann
    # time before and after executing the request, we can at least have a time
1770 313b2dd4 Michael Hanselmann
    # window.
1771 313b2dd4 Michael Hanselmann
    nvinfo_starttime = time.time()
1772 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1773 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1774 313b2dd4 Michael Hanselmann
    nvinfo_endtime = time.time()
1775 a8083063 Iustin Pop
1776 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1777 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1778 6d2e83d5 Iustin Pop
1779 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1780 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1781 112f18a5 Iustin Pop
      node = node_i.name
1782 02c521e4 Iustin Pop
      nimg = node_image[node]
1783 25361b9a Iustin Pop
1784 0a66c968 Iustin Pop
      if node_i.offline:
1785 7c874ee1 Iustin Pop
        if verbose:
1786 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1787 02c521e4 Iustin Pop
        n_offline += 1
1788 0a66c968 Iustin Pop
        continue
1789 0a66c968 Iustin Pop
1790 112f18a5 Iustin Pop
      if node == master_node:
1791 25361b9a Iustin Pop
        ntype = "master"
1792 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1793 25361b9a Iustin Pop
        ntype = "master candidate"
1794 22f0f71d Iustin Pop
      elif node_i.drained:
1795 22f0f71d Iustin Pop
        ntype = "drained"
1796 02c521e4 Iustin Pop
        n_drained += 1
1797 112f18a5 Iustin Pop
      else:
1798 25361b9a Iustin Pop
        ntype = "regular"
1799 7c874ee1 Iustin Pop
      if verbose:
1800 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1801 25361b9a Iustin Pop
1802 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1803 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1804 6f68a739 Iustin Pop
      if msg:
1805 02c521e4 Iustin Pop
        nimg.rpc_fail = True
1806 25361b9a Iustin Pop
        continue
1807 25361b9a Iustin Pop
1808 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1809 a8083063 Iustin Pop
1810 02c521e4 Iustin Pop
      nimg.call_ok = self._VerifyNode(node_i, nresult)
1811 02c521e4 Iustin Pop
      self._VerifyNodeNetwork(node_i, nresult)
1812 02c521e4 Iustin Pop
      self._VerifyNodeLVM(node_i, nresult, vg_name)
1813 02c521e4 Iustin Pop
      self._VerifyNodeFiles(node_i, nresult, file_names, local_checksums,
1814 02c521e4 Iustin Pop
                            master_files)
1815 02c521e4 Iustin Pop
      self._VerifyNodeDrbd(node_i, nresult, instanceinfo, all_drbd_map)
1816 02c521e4 Iustin Pop
      self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
1817 a8083063 Iustin Pop
1818 02c521e4 Iustin Pop
      self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
1819 02c521e4 Iustin Pop
      self._UpdateNodeInstances(node_i, nresult, nimg)
1820 02c521e4 Iustin Pop
      self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
1821 a8083063 Iustin Pop
1822 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1823 a8083063 Iustin Pop
    for instance in instancelist:
1824 7c874ee1 Iustin Pop
      if verbose:
1825 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1826 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1827 02c521e4 Iustin Pop
      self._VerifyInstance(instance, inst_config, node_image)
1828 832261fd Iustin Pop
      inst_nodes_offline = []
1829 a8083063 Iustin Pop
1830 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1831 02c521e4 Iustin Pop
      pnode_img = node_image[pnode]
1832 02c521e4 Iustin Pop
      _ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
1833 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1834 a0c9776a Iustin Pop
               " primary node failed", instance)
1835 93e4c50b Guido Trotter
1836 02c521e4 Iustin Pop
      if pnode_img.offline:
1837 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1838 832261fd Iustin Pop
1839 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1840 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1841 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1842 93e4c50b Guido Trotter
      # supported either.
1843 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1844 02c521e4 Iustin Pop
      if not inst_config.secondary_nodes:
1845 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1846 02c521e4 Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1, self.EINSTANCELAYOUT,
1847 02c521e4 Iustin Pop
               instance, "instance has multiple secondary nodes: %s",
1848 02c521e4 Iustin Pop
               utils.CommaJoin(inst_config.secondary_nodes),
1849 02c521e4 Iustin Pop
               code=self.ETYPE_WARNING)
1850 93e4c50b Guido Trotter
1851 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1852 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1853 3924700f Iustin Pop
1854 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1855 02c521e4 Iustin Pop
        s_img = node_image[snode]
1856 02c521e4 Iustin Pop
        _ErrorIf(s_img.rpc_fail and not s_img.offline, self.ENODERPC, snode,
1857 02c521e4 Iustin Pop
                 "instance %s, connection to secondary node failed", instance)
1858 02c521e4 Iustin Pop
1859 02c521e4 Iustin Pop
        if s_img.offline:
1860 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1861 832261fd Iustin Pop
1862 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1863 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1864 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1865 1f864b60 Iustin Pop
               utils.CommaJoin(inst_nodes_offline))
1866 02c521e4 Iustin Pop
      # ... or ghost nodes
1867 02c521e4 Iustin Pop
      for node in inst_config.all_nodes:
1868 02c521e4 Iustin Pop
        _ErrorIf(node_image[node].ghost, self.EINSTANCEBADNODE, instance,
1869 02c521e4 Iustin Pop
                 "instance lives on ghost node %s", node)
1870 93e4c50b Guido Trotter
1871 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1872 02c521e4 Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_image)
1873 a8083063 Iustin Pop
1874 02c521e4 Iustin Pop
    feedback_fn("* Verifying oprhan instances")
1875 02c521e4 Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_image)
1876 a8083063 Iustin Pop
1877 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1878 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1879 02c521e4 Iustin Pop
      self._VerifyNPlusOneMemory(node_image, instanceinfo)
1880 2b3b6ddd Guido Trotter
1881 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1882 2b3b6ddd Guido Trotter
    if i_non_redundant:
1883 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1884 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1885 2b3b6ddd Guido Trotter
1886 3924700f Iustin Pop
    if i_non_a_balanced:
1887 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1888 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1889 3924700f Iustin Pop
1890 0a66c968 Iustin Pop
    if n_offline:
1891 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % n_offline)
1892 0a66c968 Iustin Pop
1893 22f0f71d Iustin Pop
    if n_drained:
1894 02c521e4 Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % n_drained)
1895 22f0f71d Iustin Pop
1896 a0c9776a Iustin Pop
    return not self.bad
1897 a8083063 Iustin Pop
1898 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1899 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1900 e4376078 Iustin Pop

1901 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1902 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1903 d8fff41c Guido Trotter

1904 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1905 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1906 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1907 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1908 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1909 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1910 e4376078 Iustin Pop
        and hook results
1911 d8fff41c Guido Trotter

1912 d8fff41c Guido Trotter
    """
1913 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1914 38206f3c Iustin Pop
    # their results
1915 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1916 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1917 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1918 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1919 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1920 7c874ee1 Iustin Pop
1921 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1922 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1923 7c874ee1 Iustin Pop
        msg = res.fail_msg
1924 a0c9776a Iustin Pop
        test = msg and not res.offline
1925 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1926 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1927 dd9e9f9c Michael Hanselmann
        if res.offline or msg:
1928 dd9e9f9c Michael Hanselmann
          # No need to investigate payload if node is offline or gave an error.
1929 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1930 a0c9776a Iustin Pop
          # overrides self.bad
1931 7c874ee1 Iustin Pop
          lu_result = 1
1932 7c874ee1 Iustin Pop
          continue
1933 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1934 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1935 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1936 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1937 a0c9776a Iustin Pop
          if test:
1938 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1939 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1940 6d7b472a Iustin Pop
            lu_result = 0
1941 d8fff41c Guido Trotter
1942 d8fff41c Guido Trotter
      return lu_result
1943 d8fff41c Guido Trotter
1944 a8083063 Iustin Pop
1945 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1946 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1947 2c95a8d4 Iustin Pop

1948 2c95a8d4 Iustin Pop
  """
1949 2c95a8d4 Iustin Pop
  _OP_REQP = []
1950 d4b9d97f Guido Trotter
  REQ_BGL = False
1951 d4b9d97f Guido Trotter
1952 d4b9d97f Guido Trotter
  def ExpandNames(self):
1953 d4b9d97f Guido Trotter
    self.needed_locks = {
1954 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1955 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1956 d4b9d97f Guido Trotter
    }
1957 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1958 2c95a8d4 Iustin Pop
1959 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1960 2c95a8d4 Iustin Pop
    """Check prerequisites.
1961 2c95a8d4 Iustin Pop

1962 2c95a8d4 Iustin Pop
    This has no prerequisites.
1963 2c95a8d4 Iustin Pop

1964 2c95a8d4 Iustin Pop
    """
1965 2c95a8d4 Iustin Pop
    pass
1966 2c95a8d4 Iustin Pop
1967 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1968 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1969 2c95a8d4 Iustin Pop

1970 29d376ec Iustin Pop
    @rtype: tuple of three items
1971 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1972 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1973 29d376ec Iustin Pop
        missing volumes
1974 29d376ec Iustin Pop

1975 2c95a8d4 Iustin Pop
    """
1976 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1977 2c95a8d4 Iustin Pop
1978 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1979 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1980 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1981 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1982 2c95a8d4 Iustin Pop
1983 2c95a8d4 Iustin Pop
    nv_dict = {}
1984 2c95a8d4 Iustin Pop
    for inst in instances:
1985 2c95a8d4 Iustin Pop
      inst_lvs = {}
1986 0d68c45d Iustin Pop
      if (not inst.admin_up or
1987 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1988 2c95a8d4 Iustin Pop
        continue
1989 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1990 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1991 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1992 2c95a8d4 Iustin Pop
        for vol in vol_list:
1993 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1994 2c95a8d4 Iustin Pop
1995 2c95a8d4 Iustin Pop
    if not nv_dict:
1996 2c95a8d4 Iustin Pop
      return result
1997 2c95a8d4 Iustin Pop
1998 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1999 2c95a8d4 Iustin Pop
2000 2c95a8d4 Iustin Pop
    for node in nodes:
2001 2c95a8d4 Iustin Pop
      # node_volume
2002 29d376ec Iustin Pop
      node_res = node_lvs[node]
2003 29d376ec Iustin Pop
      if node_res.offline:
2004 ea9ddc07 Iustin Pop
        continue
2005 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
2006 29d376ec Iustin Pop
      if msg:
2007 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2008 29d376ec Iustin Pop
        res_nodes[node] = msg
2009 2c95a8d4 Iustin Pop
        continue
2010 2c95a8d4 Iustin Pop
2011 29d376ec Iustin Pop
      lvs = node_res.payload
2012 1122eb25 Iustin Pop
      for lv_name, (_, _, lv_online) in lvs.items():
2013 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
2014 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
2015 b63ed789 Iustin Pop
            and inst.name not in res_instances):
2016 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
2017 2c95a8d4 Iustin Pop
2018 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
2019 b63ed789 Iustin Pop
    # data better
2020 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
2021 b63ed789 Iustin Pop
      if inst.name not in res_missing:
2022 b63ed789 Iustin Pop
        res_missing[inst.name] = []
2023 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
2024 b63ed789 Iustin Pop
2025 2c95a8d4 Iustin Pop
    return result
2026 2c95a8d4 Iustin Pop
2027 2c95a8d4 Iustin Pop
2028 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
2029 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
2030 60975797 Iustin Pop

2031 60975797 Iustin Pop
  """
2032 60975797 Iustin Pop
  _OP_REQP = ["instances"]
2033 60975797 Iustin Pop
  REQ_BGL = False
2034 60975797 Iustin Pop
2035 60975797 Iustin Pop
  def ExpandNames(self):
2036 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
2037 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
2038 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2039 60975797 Iustin Pop
2040 60975797 Iustin Pop
    if self.op.instances:
2041 60975797 Iustin Pop
      self.wanted_names = []
2042 60975797 Iustin Pop
      for name in self.op.instances:
2043 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
2044 60975797 Iustin Pop
        self.wanted_names.append(full_name)
2045 60975797 Iustin Pop
      self.needed_locks = {
2046 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
2047 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
2048 60975797 Iustin Pop
        }
2049 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2050 60975797 Iustin Pop
    else:
2051 60975797 Iustin Pop
      self.wanted_names = None
2052 60975797 Iustin Pop
      self.needed_locks = {
2053 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
2054 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
2055 60975797 Iustin Pop
        }
2056 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
2057 60975797 Iustin Pop
2058 60975797 Iustin Pop
  def DeclareLocks(self, level):
2059 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
2060 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
2061 60975797 Iustin Pop
2062 60975797 Iustin Pop
  def CheckPrereq(self):
2063 60975797 Iustin Pop
    """Check prerequisites.
2064 60975797 Iustin Pop

2065 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
2066 60975797 Iustin Pop

2067 60975797 Iustin Pop
    """
2068 60975797 Iustin Pop
    if self.wanted_names is None:
2069 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
2070 60975797 Iustin Pop
2071 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
2072 60975797 Iustin Pop
                             in self.wanted_names]
2073 60975797 Iustin Pop
2074 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
2075 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
2076 b775c337 Iustin Pop

2077 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
2078 b775c337 Iustin Pop
    children have smaller disk size.
2079 b775c337 Iustin Pop

2080 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
2081 b775c337 Iustin Pop

2082 b775c337 Iustin Pop
    """
2083 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
2084 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
2085 b775c337 Iustin Pop
      fchild = disk.children[0]
2086 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
2087 b775c337 Iustin Pop
      if mismatch:
2088 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
2089 b775c337 Iustin Pop
                     fchild.size, disk.size)
2090 b775c337 Iustin Pop
        fchild.size = disk.size
2091 b775c337 Iustin Pop
2092 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
2093 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
2094 b775c337 Iustin Pop
    else:
2095 b775c337 Iustin Pop
      return False
2096 b775c337 Iustin Pop
2097 60975797 Iustin Pop
  def Exec(self, feedback_fn):
2098 60975797 Iustin Pop
    """Verify the size of cluster disks.
2099 60975797 Iustin Pop

2100 60975797 Iustin Pop
    """
2101 60975797 Iustin Pop
    # TODO: check child disks too
2102 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
2103 60975797 Iustin Pop
    per_node_disks = {}
2104 60975797 Iustin Pop
    for instance in self.wanted_instances:
2105 60975797 Iustin Pop
      pnode = instance.primary_node
2106 60975797 Iustin Pop
      if pnode not in per_node_disks:
2107 60975797 Iustin Pop
        per_node_disks[pnode] = []
2108 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
2109 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
2110 60975797 Iustin Pop
2111 60975797 Iustin Pop
    changed = []
2112 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
2113 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
2114 4d9e6835 Iustin Pop
      for dsk in newl:
2115 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
2116 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
2117 3cebe102 Michael Hanselmann
      if result.fail_msg:
2118 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
2119 60975797 Iustin Pop
                        " %s, ignoring", node)
2120 60975797 Iustin Pop
        continue
2121 60975797 Iustin Pop
      if len(result.data) != len(dskl):
2122 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
2123 60975797 Iustin Pop
                        node)
2124 60975797 Iustin Pop
        continue
2125 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
2126 60975797 Iustin Pop
        if size is None:
2127 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
2128 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
2129 60975797 Iustin Pop
          continue
2130 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
2131 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
2132 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
2133 60975797 Iustin Pop
          continue
2134 60975797 Iustin Pop
        size = size >> 20
2135 60975797 Iustin Pop
        if size != disk.size:
2136 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
2137 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
2138 60975797 Iustin Pop
                       instance.name, disk.size, size)
2139 60975797 Iustin Pop
          disk.size = size
2140 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2141 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
2142 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
2143 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
2144 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
2145 60975797 Iustin Pop
    return changed
2146 60975797 Iustin Pop
2147 60975797 Iustin Pop
2148 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
2149 07bd8a51 Iustin Pop
  """Rename the cluster.
2150 07bd8a51 Iustin Pop

2151 07bd8a51 Iustin Pop
  """
2152 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
2153 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
2154 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
2155 07bd8a51 Iustin Pop
2156 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
2157 07bd8a51 Iustin Pop
    """Build hooks env.
2158 07bd8a51 Iustin Pop

2159 07bd8a51 Iustin Pop
    """
2160 07bd8a51 Iustin Pop
    env = {
2161 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2162 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
2163 07bd8a51 Iustin Pop
      }
2164 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2165 47a72f18 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2166 47a72f18 Iustin Pop
    return env, [mn], all_nodes
2167 07bd8a51 Iustin Pop
2168 07bd8a51 Iustin Pop
  def CheckPrereq(self):
2169 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
2170 07bd8a51 Iustin Pop

2171 07bd8a51 Iustin Pop
    """
2172 104f4ca1 Iustin Pop
    hostname = utils.GetHostInfo(self.op.name)
2173 07bd8a51 Iustin Pop
2174 bcf043c9 Iustin Pop
    new_name = hostname.name
2175 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
2176 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
2177 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
2178 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
2179 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
2180 5c983ee5 Iustin Pop
                                 " cluster has changed",
2181 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2182 07bd8a51 Iustin Pop
    if new_ip != old_ip:
2183 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
2184 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
2185 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
2186 5c983ee5 Iustin Pop
                                   new_ip, errors.ECODE_NOTUNIQUE)
2187 07bd8a51 Iustin Pop
2188 07bd8a51 Iustin Pop
    self.op.name = new_name
2189 07bd8a51 Iustin Pop
2190 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
2191 07bd8a51 Iustin Pop
    """Rename the cluster.
2192 07bd8a51 Iustin Pop

2193 07bd8a51 Iustin Pop
    """
2194 07bd8a51 Iustin Pop
    clustername = self.op.name
2195 07bd8a51 Iustin Pop
    ip = self.ip
2196 07bd8a51 Iustin Pop
2197 07bd8a51 Iustin Pop
    # shutdown the master IP
2198 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
2199 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
2200 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
2201 07bd8a51 Iustin Pop
2202 07bd8a51 Iustin Pop
    try:
2203 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2204 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
2205 55cf7d83 Iustin Pop
      cluster.master_ip = ip
2206 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
2207 ec85e3d5 Iustin Pop
2208 ec85e3d5 Iustin Pop
      # update the known hosts file
2209 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
2210 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
2211 ec85e3d5 Iustin Pop
      try:
2212 ec85e3d5 Iustin Pop
        node_list.remove(master)
2213 ec85e3d5 Iustin Pop
      except ValueError:
2214 ec85e3d5 Iustin Pop
        pass
2215 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
2216 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
2217 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2218 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2219 6f7d4e75 Iustin Pop
        if msg:
2220 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2221 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
2222 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
2223 ec85e3d5 Iustin Pop
2224 07bd8a51 Iustin Pop
    finally:
2225 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
2226 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2227 b726aff0 Iustin Pop
      if msg:
2228 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
2229 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
2230 07bd8a51 Iustin Pop
2231 07bd8a51 Iustin Pop
2232 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
2233 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
2234 8084f9f6 Manuel Franceschini

2235 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
2236 e4376078 Iustin Pop
  @param disk: the disk to check
2237 5bbd3f7f Michael Hanselmann
  @rtype: boolean
2238 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
2239 8084f9f6 Manuel Franceschini

2240 8084f9f6 Manuel Franceschini
  """
2241 8084f9f6 Manuel Franceschini
  if disk.children:
2242 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
2243 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
2244 8084f9f6 Manuel Franceschini
        return True
2245 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
2246 8084f9f6 Manuel Franceschini
2247 8084f9f6 Manuel Franceschini
2248 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
2249 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
2250 8084f9f6 Manuel Franceschini

2251 8084f9f6 Manuel Franceschini
  """
2252 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
2253 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
2254 8084f9f6 Manuel Franceschini
  _OP_REQP = []
2255 c53279cf Guido Trotter
  REQ_BGL = False
2256 c53279cf Guido Trotter
2257 3994f455 Iustin Pop
  def CheckArguments(self):
2258 4b7735f9 Iustin Pop
    """Check parameters
2259 4b7735f9 Iustin Pop

2260 4b7735f9 Iustin Pop
    """
2261 96d1a0c5 Iustin Pop
    for attr in ["candidate_pool_size",
2262 96d1a0c5 Iustin Pop
                 "uid_pool", "add_uids", "remove_uids"]:
2263 96d1a0c5 Iustin Pop
      if not hasattr(self.op, attr):
2264 96d1a0c5 Iustin Pop
        setattr(self.op, attr, None)
2265 96d1a0c5 Iustin Pop
2266 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2267 4b7735f9 Iustin Pop
      try:
2268 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
2269 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
2270 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
2271 5c983ee5 Iustin Pop
                                   str(err), errors.ECODE_INVAL)
2272 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
2273 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed",
2274 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2275 1338f2b4 Balazs Lecz
2276 3953242f Iustin Pop
    _CheckBooleanOpField(self.op, "maintain_node_health")
2277 4b7735f9 Iustin Pop
2278 1338f2b4 Balazs Lecz
    if self.op.uid_pool:
2279 1338f2b4 Balazs Lecz
      uidpool.CheckUidPool(self.op.uid_pool)
2280 1338f2b4 Balazs Lecz
2281 fdad8c4d Balazs Lecz
    if self.op.add_uids:
2282 fdad8c4d Balazs Lecz
      uidpool.CheckUidPool(self.op.add_uids)
2283 fdad8c4d Balazs Lecz
2284 fdad8c4d Balazs Lecz
    if self.op.remove_uids:
2285 fdad8c4d Balazs Lecz
      uidpool.CheckUidPool(self.op.remove_uids)
2286 fdad8c4d Balazs Lecz
2287 c53279cf Guido Trotter
  def ExpandNames(self):
2288 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
2289 c53279cf Guido Trotter
    # all nodes to be modified.
2290 c53279cf Guido Trotter
    self.needed_locks = {
2291 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
2292 c53279cf Guido Trotter
    }
2293 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2294 8084f9f6 Manuel Franceschini
2295 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
2296 8084f9f6 Manuel Franceschini
    """Build hooks env.
2297 8084f9f6 Manuel Franceschini

2298 8084f9f6 Manuel Franceschini
    """
2299 8084f9f6 Manuel Franceschini
    env = {
2300 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
2301 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
2302 8084f9f6 Manuel Franceschini
      }
2303 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
2304 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
2305 8084f9f6 Manuel Franceschini
2306 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
2307 8084f9f6 Manuel Franceschini
    """Check prerequisites.
2308 8084f9f6 Manuel Franceschini

2309 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
2310 5f83e263 Iustin Pop
    if the given volume group is valid.
2311 8084f9f6 Manuel Franceschini

2312 8084f9f6 Manuel Franceschini
    """
2313 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
2314 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
2315 8084f9f6 Manuel Franceschini
      for inst in instances:
2316 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
2317 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
2318 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
2319 5c983ee5 Iustin Pop
                                       " lvm-based instances exist",
2320 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
2321 8084f9f6 Manuel Franceschini
2322 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
2323 779c15bb Iustin Pop
2324 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
2325 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
2326 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
2327 8084f9f6 Manuel Franceschini
      for node in node_list:
2328 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
2329 e480923b Iustin Pop
        if msg:
2330 781de953 Iustin Pop
          # ignoring down node
2331 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
2332 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
2333 781de953 Iustin Pop
          continue
2334 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
2335 781de953 Iustin Pop
                                              self.op.vg_name,
2336 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
2337 8084f9f6 Manuel Franceschini
        if vgstatus:
2338 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
2339 5c983ee5 Iustin Pop
                                     (node, vgstatus), errors.ECODE_ENVIRON)
2340 8084f9f6 Manuel Franceschini
2341 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
2342 5af3da74 Guido Trotter
    # validate params changes
2343 779c15bb Iustin Pop
    if self.op.beparams:
2344 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
2345 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
2346 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
2347 779c15bb Iustin Pop
2348 5af3da74 Guido Trotter
    if self.op.nicparams:
2349 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
2350 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
2351 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
2352 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
2353 90b704a1 Guido Trotter
      nic_errors = []
2354 90b704a1 Guido Trotter
2355 90b704a1 Guido Trotter
      # check all instances for consistency
2356 90b704a1 Guido Trotter
      for instance in self.cfg.GetAllInstancesInfo().values():
2357 90b704a1 Guido Trotter
        for nic_idx, nic in enumerate(instance.nics):
2358 90b704a1 Guido Trotter
          params_copy = copy.deepcopy(nic.nicparams)
2359 90b704a1 Guido Trotter
          params_filled = objects.FillDict(self.new_nicparams, params_copy)
2360 90b704a1 Guido Trotter
2361 90b704a1 Guido Trotter
          # check parameter syntax
2362 90b704a1 Guido Trotter
          try:
2363 90b704a1 Guido Trotter
            objects.NIC.CheckParameterSyntax(params_filled)
2364 90b704a1 Guido Trotter
          except errors.ConfigurationError, err:
2365 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: %s" %
2366 90b704a1 Guido Trotter
                              (instance.name, nic_idx, err))
2367 90b704a1 Guido Trotter
2368 90b704a1 Guido Trotter
          # if we're moving instances to routed, check that they have an ip
2369 90b704a1 Guido Trotter
          target_mode = params_filled[constants.NIC_MODE]
2370 90b704a1 Guido Trotter
          if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
2371 90b704a1 Guido Trotter
            nic_errors.append("Instance %s, nic/%d: routed nick with no ip" %
2372 90b704a1 Guido Trotter
                              (instance.name, nic_idx))
2373 90b704a1 Guido Trotter
      if nic_errors:
2374 90b704a1 Guido Trotter
        raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
2375 90b704a1 Guido Trotter
                                   "\n".join(nic_errors))
2376 5af3da74 Guido Trotter
2377 779c15bb Iustin Pop
    # hypervisor list/parameters
2378 9f3ac970 Iustin Pop
    self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
2379 779c15bb Iustin Pop
    if self.op.hvparams:
2380 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
2381 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
2382 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2383 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
2384 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
2385 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
2386 779c15bb Iustin Pop
        else:
2387 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
2388 779c15bb Iustin Pop
2389 17463d22 Renรฉ Nussbaumer
    # os hypervisor parameters
2390 17463d22 Renรฉ Nussbaumer
    self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
2391 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2392 17463d22 Renรฉ Nussbaumer
      if not isinstance(self.op.os_hvp, dict):
2393 17463d22 Renรฉ Nussbaumer
        raise errors.OpPrereqError("Invalid 'os_hvp' parameter on input",
2394 17463d22 Renรฉ Nussbaumer
                                   errors.ECODE_INVAL)
2395 17463d22 Renรฉ Nussbaumer
      for os_name, hvs in self.op.os_hvp.items():
2396 17463d22 Renรฉ Nussbaumer
        if not isinstance(hvs, dict):
2397 17463d22 Renรฉ Nussbaumer
          raise errors.OpPrereqError(("Invalid 'os_hvp' parameter on"
2398 17463d22 Renรฉ Nussbaumer
                                      " input"), errors.ECODE_INVAL)
2399 17463d22 Renรฉ Nussbaumer
        if os_name not in self.new_os_hvp:
2400 17463d22 Renรฉ Nussbaumer
          self.new_os_hvp[os_name] = hvs
2401 17463d22 Renรฉ Nussbaumer
        else:
2402 17463d22 Renรฉ Nussbaumer
          for hv_name, hv_dict in hvs.items():
2403 17463d22 Renรฉ Nussbaumer
            if hv_name not in self.new_os_hvp[os_name]:
2404 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name] = hv_dict
2405 17463d22 Renรฉ Nussbaumer
            else:
2406 17463d22 Renรฉ Nussbaumer
              self.new_os_hvp[os_name][hv_name].update(hv_dict)
2407 17463d22 Renรฉ Nussbaumer
2408 9f3ac970 Iustin Pop
    # changes to the hypervisor list
2409 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2410 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
2411 b119bccb Guido Trotter
      if not self.hv_list:
2412 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
2413 5c983ee5 Iustin Pop
                                   " least one member",
2414 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2415 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
2416 b119bccb Guido Trotter
      if invalid_hvs:
2417 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
2418 ab3e6da8 Iustin Pop
                                   " entries: %s" %
2419 ab3e6da8 Iustin Pop
                                   utils.CommaJoin(invalid_hvs),
2420 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2421 9f3ac970 Iustin Pop
      for hv in self.hv_list:
2422 9f3ac970 Iustin Pop
        # if the hypervisor doesn't already exist in the cluster
2423 9f3ac970 Iustin Pop
        # hvparams, we initialize it to empty, and then (in both
2424 9f3ac970 Iustin Pop
        # cases) we make sure to fill the defaults, as we might not
2425 9f3ac970 Iustin Pop
        # have a complete defaults list if the hypervisor wasn't
2426 9f3ac970 Iustin Pop
        # enabled before
2427 9f3ac970 Iustin Pop
        if hv not in new_hvp:
2428 9f3ac970 Iustin Pop
          new_hvp[hv] = {}
2429 9f3ac970 Iustin Pop
        new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
2430 9f3ac970 Iustin Pop
        utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
2431 779c15bb Iustin Pop
    else:
2432 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
2433 779c15bb Iustin Pop
2434 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
2435 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
2436 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
2437 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
2438 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
2439 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
2440 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
2441 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2442 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2443 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
2444 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
2445 779c15bb Iustin Pop
2446 cced4c39 Iustin Pop
    if self.op.os_hvp:
2447 cced4c39 Iustin Pop
      # no need to check any newly-enabled hypervisors, since the
2448 cced4c39 Iustin Pop
      # defaults have already been checked in the above code-block
2449 cced4c39 Iustin Pop
      for os_name, os_hvp in self.new_os_hvp.items():
2450 cced4c39 Iustin Pop
        for hv_name, hv_params in os_hvp.items():
2451 cced4c39 Iustin Pop
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
2452 cced4c39 Iustin Pop
          # we need to fill in the new os_hvp on top of the actual hv_p
2453 cced4c39 Iustin Pop
          cluster_defaults = self.new_hvparams.get(hv_name, {})
2454 cced4c39 Iustin Pop
          new_osp = objects.FillDict(cluster_defaults, hv_params)
2455 cced4c39 Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
2456 cced4c39 Iustin Pop
          hv_class.CheckParameterSyntax(new_osp)
2457 cced4c39 Iustin Pop
          _CheckHVParams(self, node_list, hv_name, new_osp)
2458 cced4c39 Iustin Pop
2459 cced4c39 Iustin Pop
2460 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
2461 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
2462 8084f9f6 Manuel Franceschini

2463 8084f9f6 Manuel Franceschini
    """
2464 779c15bb Iustin Pop
    if self.op.vg_name is not None:
2465 b2482333 Guido Trotter
      new_volume = self.op.vg_name
2466 b2482333 Guido Trotter
      if not new_volume:
2467 b2482333 Guido Trotter
        new_volume = None
2468 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
2469 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
2470 779c15bb Iustin Pop
      else:
2471 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
2472 779c15bb Iustin Pop
                    " state, not changing")
2473 779c15bb Iustin Pop
    if self.op.hvparams:
2474 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2475 17463d22 Renรฉ Nussbaumer
    if self.op.os_hvp:
2476 17463d22 Renรฉ Nussbaumer
      self.cluster.os_hvp = self.new_os_hvp
2477 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
2478 9f3ac970 Iustin Pop
      self.cluster.hvparams = self.new_hvparams
2479 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
2480 779c15bb Iustin Pop
    if self.op.beparams:
2481 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
2482 5af3da74 Guido Trotter
    if self.op.nicparams:
2483 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
2484 5af3da74 Guido Trotter
2485 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
2486 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
2487 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
2488 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
2489 4b7735f9 Iustin Pop
2490 3953242f Iustin Pop
    if self.op.maintain_node_health is not None:
2491 3953242f Iustin Pop
      self.cluster.maintain_node_health = self.op.maintain_node_health
2492 3953242f Iustin Pop
2493 fdad8c4d Balazs Lecz
    if self.op.add_uids is not None:
2494 fdad8c4d Balazs Lecz
      uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
2495 fdad8c4d Balazs Lecz
2496 fdad8c4d Balazs Lecz
    if self.op.remove_uids is not None:
2497 fdad8c4d Balazs Lecz
      uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
2498 fdad8c4d Balazs Lecz
2499 1338f2b4 Balazs Lecz
    if self.op.uid_pool is not None:
2500 1338f2b4 Balazs Lecz
      self.cluster.uid_pool = self.op.uid_pool
2501 1338f2b4 Balazs Lecz
2502 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
2503 8084f9f6 Manuel Franceschini
2504 8084f9f6 Manuel Franceschini
2505 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
2506 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
2507 28eddce5 Guido Trotter

2508 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
2509 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
2510 28eddce5 Guido Trotter
  makes sure those are copied.
2511 28eddce5 Guido Trotter

2512 28eddce5 Guido Trotter
  @param lu: calling logical unit
2513 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2514 28eddce5 Guido Trotter

2515 28eddce5 Guido Trotter
  """
2516 28eddce5 Guido Trotter
  # 1. Gather target nodes
2517 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2518 6819dc49 Iustin Pop
  dist_nodes = lu.cfg.GetOnlineNodeList()
2519 28eddce5 Guido Trotter
  if additional_nodes is not None:
2520 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2521 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2522 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2523 a4eae71f Michael Hanselmann
2524 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2525 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2526 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2527 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2528 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2529 6b7d5878 Michael Hanselmann
                    constants.CONFD_HMAC_KEY,
2530 28eddce5 Guido Trotter
                   ])
2531 e1b8653f Guido Trotter
2532 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2533 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2534 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2535 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2536 e1b8653f Guido Trotter
2537 28eddce5 Guido Trotter
  # 3. Perform the files upload
2538 28eddce5 Guido Trotter
  for fname in dist_files:
2539 28eddce5 Guido Trotter
    if os.path.exists(fname):
2540 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2541 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2542 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2543 6f7d4e75 Iustin Pop
        if msg:
2544 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2545 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2546 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2547 28eddce5 Guido Trotter
2548 28eddce5 Guido Trotter
2549 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2550 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2551 afee0879 Iustin Pop

2552 afee0879 Iustin Pop
  This is a very simple LU.
2553 afee0879 Iustin Pop

2554 afee0879 Iustin Pop
  """
2555 afee0879 Iustin Pop
  _OP_REQP = []
2556 afee0879 Iustin Pop
  REQ_BGL = False
2557 afee0879 Iustin Pop
2558 afee0879 Iustin Pop
  def ExpandNames(self):
2559 afee0879 Iustin Pop
    self.needed_locks = {
2560 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2561 afee0879 Iustin Pop
    }
2562 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2563 afee0879 Iustin Pop
2564 afee0879 Iustin Pop
  def CheckPrereq(self):
2565 afee0879 Iustin Pop
    """Check prerequisites.
2566 afee0879 Iustin Pop

2567 afee0879 Iustin Pop
    """
2568 afee0879 Iustin Pop
2569 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2570 afee0879 Iustin Pop
    """Redistribute the configuration.
2571 afee0879 Iustin Pop

2572 afee0879 Iustin Pop
    """
2573 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2574 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2575 afee0879 Iustin Pop
2576 afee0879 Iustin Pop
2577 b6c07b79 Michael Hanselmann
def _WaitForSync(lu, instance, oneshot=False):
2578 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2579 a8083063 Iustin Pop

2580 a8083063 Iustin Pop
  """
2581 a8083063 Iustin Pop
  if not instance.disks:
2582 a8083063 Iustin Pop
    return True
2583 a8083063 Iustin Pop
2584 a8083063 Iustin Pop
  if not oneshot:
2585 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2586 a8083063 Iustin Pop
2587 a8083063 Iustin Pop
  node = instance.primary_node
2588 a8083063 Iustin Pop
2589 a8083063 Iustin Pop
  for dev in instance.disks:
2590 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2591 a8083063 Iustin Pop
2592 6bcb1446 Michael Hanselmann
  # TODO: Convert to utils.Retry
2593 6bcb1446 Michael Hanselmann
2594 a8083063 Iustin Pop
  retries = 0
2595 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2596 a8083063 Iustin Pop
  while True:
2597 a8083063 Iustin Pop
    max_time = 0
2598 a8083063 Iustin Pop
    done = True
2599 a8083063 Iustin Pop
    cumul_degraded = False
2600 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2601 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2602 3efa9051 Iustin Pop
    if msg:
2603 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2604 a8083063 Iustin Pop
      retries += 1
2605 a8083063 Iustin Pop
      if retries >= 10:
2606 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2607 3ecf6786 Iustin Pop
                                 " aborting." % node)
2608 a8083063 Iustin Pop
      time.sleep(6)
2609 a8083063 Iustin Pop
      continue
2610 3efa9051 Iustin Pop
    rstats = rstats.payload
2611 a8083063 Iustin Pop
    retries = 0
2612 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2613 a8083063 Iustin Pop
      if mstat is None:
2614 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2615 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2616 a8083063 Iustin Pop
        continue
2617 36145b12 Michael Hanselmann
2618 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2619 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2620 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2621 a8083063 Iustin Pop
        done = False
2622 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2623 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2624 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2625 a8083063 Iustin Pop
        else:
2626 a8083063 Iustin Pop
          rem_time = "no time estimate"
2627 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2628 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2629 4d4a651d Michael Hanselmann
                         rem_time))
2630 fbafd7a8 Iustin Pop
2631 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2632 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2633 fbafd7a8 Iustin Pop
    # we force restart of the loop
2634 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2635 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2636 fbafd7a8 Iustin Pop
      degr_retries -= 1
2637 fbafd7a8 Iustin Pop
      time.sleep(1)
2638 fbafd7a8 Iustin Pop
      continue
2639 fbafd7a8 Iustin Pop
2640 a8083063 Iustin Pop
    if done or oneshot:
2641 a8083063 Iustin Pop
      break
2642 a8083063 Iustin Pop
2643 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2644 a8083063 Iustin Pop
2645 a8083063 Iustin Pop
  if done:
2646 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2647 a8083063 Iustin Pop
  return not cumul_degraded
2648 a8083063 Iustin Pop
2649 a8083063 Iustin Pop
2650 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2651 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2652 a8083063 Iustin Pop

2653 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2654 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2655 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2656 0834c866 Iustin Pop

2657 a8083063 Iustin Pop
  """
2658 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
  result = True
2661 96acbc09 Michael Hanselmann
2662 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2663 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2664 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2665 23829f6f Iustin Pop
    if msg:
2666 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2667 23829f6f Iustin Pop
      result = False
2668 23829f6f Iustin Pop
    elif not rstats.payload:
2669 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2670 a8083063 Iustin Pop
      result = False
2671 a8083063 Iustin Pop
    else:
2672 96acbc09 Michael Hanselmann
      if ldisk:
2673 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2674 96acbc09 Michael Hanselmann
      else:
2675 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2676 96acbc09 Michael Hanselmann
2677 a8083063 Iustin Pop
  if dev.children:
2678 a8083063 Iustin Pop
    for child in dev.children:
2679 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2680 a8083063 Iustin Pop
2681 a8083063 Iustin Pop
  return result
2682 a8083063 Iustin Pop
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2685 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2686 a8083063 Iustin Pop

2687 a8083063 Iustin Pop
  """
2688 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2689 6bf01bbb Guido Trotter
  REQ_BGL = False
2690 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2691 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2692 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2693 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2694 a8083063 Iustin Pop
2695 6bf01bbb Guido Trotter
  def ExpandNames(self):
2696 1f9430d6 Iustin Pop
    if self.op.names:
2697 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported",
2698 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2699 1f9430d6 Iustin Pop
2700 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2701 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2702 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2703 1f9430d6 Iustin Pop
2704 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2705 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2706 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2707 6bf01bbb Guido Trotter
    self.needed_locks = {}
2708 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2709 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2710 6bf01bbb Guido Trotter
2711 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2712 6bf01bbb Guido Trotter
    """Check prerequisites.
2713 6bf01bbb Guido Trotter

2714 6bf01bbb Guido Trotter
    """
2715 6bf01bbb Guido Trotter
2716 1f9430d6 Iustin Pop
  @staticmethod
2717 857121ad Iustin Pop
  def _DiagnoseByOS(rlist):
2718 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2719 1f9430d6 Iustin Pop

2720 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2721 1f9430d6 Iustin Pop

2722 e4376078 Iustin Pop
    @rtype: dict
2723 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2724 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2725 e4376078 Iustin Pop

2726 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2727 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2728 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2729 e4376078 Iustin Pop
          }
2730 1f9430d6 Iustin Pop

2731 1f9430d6 Iustin Pop
    """
2732 1f9430d6 Iustin Pop
    all_os = {}
2733 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2734 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2735 a6ab004b Iustin Pop
    # make all OSes invalid
2736 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2737 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2738 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2739 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2740 1f9430d6 Iustin Pop
        continue
2741 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2742 255dcebd Iustin Pop
        if name not in all_os:
2743 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2744 1f9430d6 Iustin Pop
          # for each node in node_list
2745 255dcebd Iustin Pop
          all_os[name] = {}
2746 a6ab004b Iustin Pop
          for nname in good_nodes:
2747 255dcebd Iustin Pop
            all_os[name][nname] = []
2748 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2749 1f9430d6 Iustin Pop
    return all_os
2750 a8083063 Iustin Pop
2751 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2752 a8083063 Iustin Pop
    """Compute the list of OSes.
2753 a8083063 Iustin Pop

2754 a8083063 Iustin Pop
    """
2755 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2756 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2757 857121ad Iustin Pop
    pol = self._DiagnoseByOS(node_data)
2758 1f9430d6 Iustin Pop
    output = []
2759 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2760 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2761 1e288a26 Guido Trotter
2762 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2763 1f9430d6 Iustin Pop
      row = []
2764 1e288a26 Guido Trotter
      if calc_valid:
2765 1e288a26 Guido Trotter
        valid = True
2766 1e288a26 Guido Trotter
        variants = None
2767 1e288a26 Guido Trotter
        for osl in os_data.values():
2768 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2769 1e288a26 Guido Trotter
          if not valid:
2770 1e288a26 Guido Trotter
            variants = None
2771 1e288a26 Guido Trotter
            break
2772 1e288a26 Guido Trotter
          if calc_variants:
2773 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2774 1e288a26 Guido Trotter
            if variants is None:
2775 1e288a26 Guido Trotter
              variants = node_variants
2776 1e288a26 Guido Trotter
            else:
2777 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2778 1e288a26 Guido Trotter
2779 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2780 1f9430d6 Iustin Pop
        if field == "name":
2781 1f9430d6 Iustin Pop
          val = os_name
2782 1f9430d6 Iustin Pop
        elif field == "valid":
2783 1e288a26 Guido Trotter
          val = valid
2784 1f9430d6 Iustin Pop
        elif field == "node_status":
2785 255dcebd Iustin Pop
          # this is just a copy of the dict
2786 1f9430d6 Iustin Pop
          val = {}
2787 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2788 255dcebd Iustin Pop
            val[node_name] = nos_list
2789 1e288a26 Guido Trotter
        elif field == "variants":
2790 1e288a26 Guido Trotter
          val =  variants
2791 1f9430d6 Iustin Pop
        else:
2792 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2793 1f9430d6 Iustin Pop
        row.append(val)
2794 1f9430d6 Iustin Pop
      output.append(row)
2795 1f9430d6 Iustin Pop
2796 1f9430d6 Iustin Pop
    return output
2797 a8083063 Iustin Pop
2798 a8083063 Iustin Pop
2799 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2800 a8083063 Iustin Pop
  """Logical unit for removing a node.
2801 a8083063 Iustin Pop

2802 a8083063 Iustin Pop
  """
2803 a8083063 Iustin Pop
  HPATH = "node-remove"
2804 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2805 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2808 a8083063 Iustin Pop
    """Build hooks env.
2809 a8083063 Iustin Pop

2810 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2811 d08869ee Guido Trotter
    node would then be impossible to remove.
2812 a8083063 Iustin Pop

2813 a8083063 Iustin Pop
    """
2814 396e1b78 Michael Hanselmann
    env = {
2815 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2816 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2817 396e1b78 Michael Hanselmann
      }
2818 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2819 9bb31ea8 Iustin Pop
    try:
2820 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2821 9bb31ea8 Iustin Pop
    except ValueError:
2822 9bb31ea8 Iustin Pop
      logging.warning("Node %s which is about to be removed not found"
2823 9bb31ea8 Iustin Pop
                      " in the all nodes list", self.op.node_name)
2824 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2825 a8083063 Iustin Pop
2826 a8083063 Iustin Pop
  def CheckPrereq(self):
2827 a8083063 Iustin Pop
    """Check prerequisites.
2828 a8083063 Iustin Pop

2829 a8083063 Iustin Pop
    This checks:
2830 a8083063 Iustin Pop
     - the node exists in the configuration
2831 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2832 a8083063 Iustin Pop
     - it's not the master
2833 a8083063 Iustin Pop

2834 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2835 a8083063 Iustin Pop

2836 a8083063 Iustin Pop
    """
2837 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
2838 cf26a87a Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.node_name)
2839 cf26a87a Iustin Pop
    assert node is not None
2840 a8083063 Iustin Pop
2841 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2842 a8083063 Iustin Pop
2843 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2844 a8083063 Iustin Pop
    if node.name == masternode:
2845 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2846 5c983ee5 Iustin Pop
                                 " you need to failover first.",
2847 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
    for instance_name in instance_list:
2850 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2851 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2852 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2853 5c983ee5 Iustin Pop
                                   " please remove first." % instance_name,
2854 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
2855 a8083063 Iustin Pop
    self.op.node_name = node.name
2856 a8083063 Iustin Pop
    self.node = node
2857 a8083063 Iustin Pop
2858 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2859 a8083063 Iustin Pop
    """Removes the node from the cluster.
2860 a8083063 Iustin Pop

2861 a8083063 Iustin Pop
    """
2862 a8083063 Iustin Pop
    node = self.node
2863 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2864 9a4f63d1 Iustin Pop
                 node.name)
2865 a8083063 Iustin Pop
2866 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2867 b989b9d9 Ken Wehr
2868 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2869 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2870 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2871 a8083063 Iustin Pop
2872 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2873 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2874 cd46f3b4 Luca Bigliardi
    try:
2875 1122eb25 Iustin Pop
      hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2876 3cb5c1e3 Luca Bigliardi
    except:
2877 7260cfbe Iustin Pop
      # pylint: disable-msg=W0702
2878 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2879 cd46f3b4 Luca Bigliardi
2880 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2881 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2882 0623d351 Iustin Pop
    if msg:
2883 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2884 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2885 c8a0948f Michael Hanselmann
2886 7672a621 Iustin Pop
    # Remove node from our /etc/hosts
2887 7672a621 Iustin Pop
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2888 7672a621 Iustin Pop
      # FIXME: this should be done via an rpc call to node daemon
2889 7672a621 Iustin Pop
      utils.RemoveHostFromEtcHosts(node.name)
2890 7672a621 Iustin Pop
      _RedistributeAncillaryFiles(self)
2891 7672a621 Iustin Pop
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2894 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2895 a8083063 Iustin Pop

2896 a8083063 Iustin Pop
  """
2897 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
2898 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2899 35705d8f Guido Trotter
  REQ_BGL = False
2900 19bed813 Iustin Pop
2901 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2902 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2903 19bed813 Iustin Pop
2904 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2905 31bf511f Iustin Pop
    "dtotal", "dfree",
2906 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2907 31bf511f Iustin Pop
    "bootid",
2908 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2909 31bf511f Iustin Pop
    )
2910 31bf511f Iustin Pop
2911 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2912 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2913 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2914 31bf511f Iustin Pop
    "pip", "sip", "tags",
2915 0e67cdbe Iustin Pop
    "master",
2916 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2917 31bf511f Iustin Pop
    )
2918 a8083063 Iustin Pop
2919 35705d8f Guido Trotter
  def ExpandNames(self):
2920 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2921 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2922 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2923 a8083063 Iustin Pop
2924 35705d8f Guido Trotter
    self.needed_locks = {}
2925 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2926 c8d8b4c8 Iustin Pop
2927 c8d8b4c8 Iustin Pop
    if self.op.names:
2928 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2929 35705d8f Guido Trotter
    else:
2930 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2931 c8d8b4c8 Iustin Pop
2932 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2933 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2934 c8d8b4c8 Iustin Pop
    if self.do_locking:
2935 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2936 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2937 c8d8b4c8 Iustin Pop
2938 35705d8f Guido Trotter
  def CheckPrereq(self):
2939 35705d8f Guido Trotter
    """Check prerequisites.
2940 35705d8f Guido Trotter

2941 35705d8f Guido Trotter
    """
2942 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2943 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2944 c8d8b4c8 Iustin Pop
    pass
2945 a8083063 Iustin Pop
2946 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2947 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2948 a8083063 Iustin Pop

2949 a8083063 Iustin Pop
    """
2950 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2951 c8d8b4c8 Iustin Pop
    if self.do_locking:
2952 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2953 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2954 3fa93523 Guido Trotter
      nodenames = self.wanted
2955 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2956 3fa93523 Guido Trotter
      if missing:
2957 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2958 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2959 c8d8b4c8 Iustin Pop
    else:
2960 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2961 c1f1cbb2 Iustin Pop
2962 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2963 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
    # begin data gathering
2966 a8083063 Iustin Pop
2967 bc8e4a1a Iustin Pop
    if self.do_node_query:
2968 a8083063 Iustin Pop
      live_data = {}
2969 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2970 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2971 a8083063 Iustin Pop
      for name in nodenames:
2972 781de953 Iustin Pop
        nodeinfo = node_data[name]
2973 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2974 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2975 d599d686 Iustin Pop
          fn = utils.TryConvert
2976 a8083063 Iustin Pop
          live_data[name] = {
2977 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2978 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2979 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2980 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2981 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2982 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2983 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2984 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2985 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2986 a8083063 Iustin Pop
            }
2987 a8083063 Iustin Pop
        else:
2988 a8083063 Iustin Pop
          live_data[name] = {}
2989 a8083063 Iustin Pop
    else:
2990 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2991 a8083063 Iustin Pop
2992 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2993 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2994 a8083063 Iustin Pop
2995 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2996 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2997 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2998 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2999 a8083063 Iustin Pop
3000 1122eb25 Iustin Pop
      for inst in inst_data.values():
3001 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
3002 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
3003 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
3004 ec223efb Iustin Pop
          if secnode in node_to_secondary:
3005 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
3006 a8083063 Iustin Pop
3007 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
3008 0e67cdbe Iustin Pop
3009 a8083063 Iustin Pop
    # end data gathering
3010 a8083063 Iustin Pop
3011 a8083063 Iustin Pop
    output = []
3012 a8083063 Iustin Pop
    for node in nodelist:
3013 a8083063 Iustin Pop
      node_output = []
3014 a8083063 Iustin Pop
      for field in self.op.output_fields:
3015 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
3016 19bed813 Iustin Pop
          val = getattr(node, field)
3017 ec223efb Iustin Pop
        elif field == "pinst_list":
3018 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
3019 ec223efb Iustin Pop
        elif field == "sinst_list":
3020 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
3021 ec223efb Iustin Pop
        elif field == "pinst_cnt":
3022 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
3023 ec223efb Iustin Pop
        elif field == "sinst_cnt":
3024 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
3025 a8083063 Iustin Pop
        elif field == "pip":
3026 a8083063 Iustin Pop
          val = node.primary_ip
3027 a8083063 Iustin Pop
        elif field == "sip":
3028 a8083063 Iustin Pop
          val = node.secondary_ip
3029 130a6a6f Iustin Pop
        elif field == "tags":
3030 130a6a6f Iustin Pop
          val = list(node.GetTags())
3031 0e67cdbe Iustin Pop
        elif field == "master":
3032 0e67cdbe Iustin Pop
          val = node.name == master_node
3033 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
3034 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
3035 c120ff34 Iustin Pop
        elif field == "role":
3036 c120ff34 Iustin Pop
          if node.name == master_node:
3037 c120ff34 Iustin Pop
            val = "M"
3038 c120ff34 Iustin Pop
          elif node.master_candidate:
3039 c120ff34 Iustin Pop
            val = "C"
3040 c120ff34 Iustin Pop
          elif node.drained:
3041 c120ff34 Iustin Pop
            val = "D"
3042 c120ff34 Iustin Pop
          elif node.offline:
3043 c120ff34 Iustin Pop
            val = "O"
3044 c120ff34 Iustin Pop
          else:
3045 c120ff34 Iustin Pop
            val = "R"
3046 a8083063 Iustin Pop
        else:
3047 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3048 a8083063 Iustin Pop
        node_output.append(val)
3049 a8083063 Iustin Pop
      output.append(node_output)
3050 a8083063 Iustin Pop
3051 a8083063 Iustin Pop
    return output
3052 a8083063 Iustin Pop
3053 a8083063 Iustin Pop
3054 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
3055 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
3056 dcb93971 Michael Hanselmann

3057 dcb93971 Michael Hanselmann
  """
3058 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
3059 21a15682 Guido Trotter
  REQ_BGL = False
3060 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
3061 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
3062 21a15682 Guido Trotter
3063 21a15682 Guido Trotter
  def ExpandNames(self):
3064 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3065 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3066 21a15682 Guido Trotter
                       selected=self.op.output_fields)
3067 21a15682 Guido Trotter
3068 21a15682 Guido Trotter
    self.needed_locks = {}
3069 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3070 21a15682 Guido Trotter
    if not self.op.nodes:
3071 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3072 21a15682 Guido Trotter
    else:
3073 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
3074 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
3075 dcb93971 Michael Hanselmann
3076 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
3077 dcb93971 Michael Hanselmann
    """Check prerequisites.
3078 dcb93971 Michael Hanselmann

3079 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
3080 dcb93971 Michael Hanselmann

3081 dcb93971 Michael Hanselmann
    """
3082 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3083 dcb93971 Michael Hanselmann
3084 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
3085 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3086 dcb93971 Michael Hanselmann

3087 dcb93971 Michael Hanselmann
    """
3088 a7ba5e53 Iustin Pop
    nodenames = self.nodes
3089 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
3090 dcb93971 Michael Hanselmann
3091 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3092 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
3093 dcb93971 Michael Hanselmann
3094 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
3095 dcb93971 Michael Hanselmann
3096 dcb93971 Michael Hanselmann
    output = []
3097 dcb93971 Michael Hanselmann
    for node in nodenames:
3098 10bfe6cb Iustin Pop
      nresult = volumes[node]
3099 10bfe6cb Iustin Pop
      if nresult.offline:
3100 10bfe6cb Iustin Pop
        continue
3101 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
3102 10bfe6cb Iustin Pop
      if msg:
3103 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
3104 37d19eb2 Michael Hanselmann
        continue
3105 37d19eb2 Michael Hanselmann
3106 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
3107 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
3108 dcb93971 Michael Hanselmann
3109 dcb93971 Michael Hanselmann
      for vol in node_vols:
3110 dcb93971 Michael Hanselmann
        node_output = []
3111 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
3112 dcb93971 Michael Hanselmann
          if field == "node":
3113 dcb93971 Michael Hanselmann
            val = node
3114 dcb93971 Michael Hanselmann
          elif field == "phys":
3115 dcb93971 Michael Hanselmann
            val = vol['dev']
3116 dcb93971 Michael Hanselmann
          elif field == "vg":
3117 dcb93971 Michael Hanselmann
            val = vol['vg']
3118 dcb93971 Michael Hanselmann
          elif field == "name":
3119 dcb93971 Michael Hanselmann
            val = vol['name']
3120 dcb93971 Michael Hanselmann
          elif field == "size":
3121 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
3122 dcb93971 Michael Hanselmann
          elif field == "instance":
3123 dcb93971 Michael Hanselmann
            for inst in ilist:
3124 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
3125 dcb93971 Michael Hanselmann
                continue
3126 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
3127 dcb93971 Michael Hanselmann
                val = inst.name
3128 dcb93971 Michael Hanselmann
                break
3129 dcb93971 Michael Hanselmann
            else:
3130 dcb93971 Michael Hanselmann
              val = '-'
3131 dcb93971 Michael Hanselmann
          else:
3132 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
3133 dcb93971 Michael Hanselmann
          node_output.append(str(val))
3134 dcb93971 Michael Hanselmann
3135 dcb93971 Michael Hanselmann
        output.append(node_output)
3136 dcb93971 Michael Hanselmann
3137 dcb93971 Michael Hanselmann
    return output
3138 dcb93971 Michael Hanselmann
3139 dcb93971 Michael Hanselmann
3140 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
3141 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
3142 9e5442ce Michael Hanselmann

3143 9e5442ce Michael Hanselmann
  """
3144 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3145 9e5442ce Michael Hanselmann
  REQ_BGL = False
3146 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3147 9e5442ce Michael Hanselmann
3148 0e3baaf3 Iustin Pop
  def CheckArguments(self):
3149 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
3150 9e5442ce Michael Hanselmann
3151 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
3152 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
3153 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
3154 9e5442ce Michael Hanselmann
3155 0e3baaf3 Iustin Pop
  def ExpandNames(self):
3156 9e5442ce Michael Hanselmann
    self.needed_locks = {}
3157 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
3158 9e5442ce Michael Hanselmann
3159 9e5442ce Michael Hanselmann
    if self.op.nodes:
3160 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
3161 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
3162 9e5442ce Michael Hanselmann
    else:
3163 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3164 9e5442ce Michael Hanselmann
3165 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
3166 9e5442ce Michael Hanselmann
    """Check prerequisites.
3167 9e5442ce Michael Hanselmann

3168 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
3169 9e5442ce Michael Hanselmann

3170 9e5442ce Michael Hanselmann
    """
3171 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
3172 9e5442ce Michael Hanselmann
3173 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3174 9e5442ce Michael Hanselmann
3175 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
3176 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
3177 9e5442ce Michael Hanselmann

3178 9e5442ce Michael Hanselmann
    """
3179 9e5442ce Michael Hanselmann
    # Always get name to sort by
3180 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
3181 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
3182 9e5442ce Michael Hanselmann
    else:
3183 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
3184 9e5442ce Michael Hanselmann
3185 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
3186 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
3187 620a85fd Iustin Pop
      while extra in fields:
3188 620a85fd Iustin Pop
        fields.remove(extra)
3189 9e5442ce Michael Hanselmann
3190 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
3191 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
3192 9e5442ce Michael Hanselmann
3193 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3194 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
3195 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
3196 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
3197 9e5442ce Michael Hanselmann
3198 9e5442ce Michael Hanselmann
    result = []
3199 9e5442ce Michael Hanselmann
3200 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
3201 9e5442ce Michael Hanselmann
      nresult = data[node]
3202 9e5442ce Michael Hanselmann
      if nresult.offline:
3203 9e5442ce Michael Hanselmann
        continue
3204 9e5442ce Michael Hanselmann
3205 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
3206 9e5442ce Michael Hanselmann
      if msg:
3207 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
3208 9e5442ce Michael Hanselmann
        continue
3209 9e5442ce Michael Hanselmann
3210 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
3211 9e5442ce Michael Hanselmann
3212 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
3213 9e5442ce Michael Hanselmann
        row = rows[name]
3214 9e5442ce Michael Hanselmann
3215 9e5442ce Michael Hanselmann
        out = []
3216 9e5442ce Michael Hanselmann
3217 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
3218 620a85fd Iustin Pop
          if field == constants.SF_NODE:
3219 9e5442ce Michael Hanselmann
            val = node
3220 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
3221 620a85fd Iustin Pop
            val = self.op.storage_type
3222 9e5442ce Michael Hanselmann
          elif field in field_idx:
3223 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
3224 9e5442ce Michael Hanselmann
          else:
3225 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
3226 9e5442ce Michael Hanselmann
3227 9e5442ce Michael Hanselmann
          out.append(val)
3228 9e5442ce Michael Hanselmann
3229 9e5442ce Michael Hanselmann
        result.append(out)
3230 9e5442ce Michael Hanselmann
3231 9e5442ce Michael Hanselmann
    return result
3232 9e5442ce Michael Hanselmann
3233 9e5442ce Michael Hanselmann
3234 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
3235 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
3236 efb8da02 Michael Hanselmann

3237 efb8da02 Michael Hanselmann
  """
3238 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
3239 efb8da02 Michael Hanselmann
  REQ_BGL = False
3240 efb8da02 Michael Hanselmann
3241 efb8da02 Michael Hanselmann
  def CheckArguments(self):
3242 cf26a87a Iustin Pop
    self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
3243 efb8da02 Michael Hanselmann
3244 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
3245 efb8da02 Michael Hanselmann
3246 efb8da02 Michael Hanselmann
  def ExpandNames(self):
3247 efb8da02 Michael Hanselmann
    self.needed_locks = {
3248 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
3249 efb8da02 Michael Hanselmann
      }
3250 efb8da02 Michael Hanselmann
3251 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
3252 efb8da02 Michael Hanselmann
    """Check prerequisites.
3253 efb8da02 Michael Hanselmann

3254 efb8da02 Michael Hanselmann
    """
3255 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
3256 efb8da02 Michael Hanselmann
3257 efb8da02 Michael Hanselmann
    try:
3258 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
3259 efb8da02 Michael Hanselmann
    except KeyError:
3260 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
3261 5c983ee5 Iustin Pop
                                 " modified" % storage_type,
3262 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3263 efb8da02 Michael Hanselmann
3264 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
3265 efb8da02 Michael Hanselmann
    if diff:
3266 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
3267 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
3268 5c983ee5 Iustin Pop
                                 (storage_type, list(diff)),
3269 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3270 efb8da02 Michael Hanselmann
3271 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
3272 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
3273 efb8da02 Michael Hanselmann

3274 efb8da02 Michael Hanselmann
    """
3275 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
3276 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
3277 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
3278 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
3279 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
3280 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
3281 efb8da02 Michael Hanselmann
3282 efb8da02 Michael Hanselmann
3283 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
3284 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
3285 a8083063 Iustin Pop

3286 a8083063 Iustin Pop
  """
3287 a8083063 Iustin Pop
  HPATH = "node-add"
3288 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3289 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
3290 a8083063 Iustin Pop
3291 44caf5a8 Iustin Pop
  def CheckArguments(self):
3292 44caf5a8 Iustin Pop
    # validate/normalize the node name
3293 44caf5a8 Iustin Pop
    self.op.node_name = utils.HostInfo.NormalizeName(self.op.node_name)
3294 44caf5a8 Iustin Pop
3295 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3296 a8083063 Iustin Pop
    """Build hooks env.
3297 a8083063 Iustin Pop

3298 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
3299 a8083063 Iustin Pop

3300 a8083063 Iustin Pop
    """
3301 a8083063 Iustin Pop
    env = {
3302 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
3303 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
3304 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
3305 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
3306 a8083063 Iustin Pop
      }
3307 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
3308 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
3309 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
3310 a8083063 Iustin Pop
3311 a8083063 Iustin Pop
  def CheckPrereq(self):
3312 a8083063 Iustin Pop
    """Check prerequisites.
3313 a8083063 Iustin Pop

3314 a8083063 Iustin Pop
    This checks:
3315 a8083063 Iustin Pop
     - the new node is not already in the config
3316 a8083063 Iustin Pop
     - it is resolvable
3317 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
3318 a8083063 Iustin Pop

3319 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
3320 a8083063 Iustin Pop

3321 a8083063 Iustin Pop
    """
3322 a8083063 Iustin Pop
    node_name = self.op.node_name
3323 a8083063 Iustin Pop
    cfg = self.cfg
3324 a8083063 Iustin Pop
3325 104f4ca1 Iustin Pop
    dns_data = utils.GetHostInfo(node_name)
3326 a8083063 Iustin Pop
3327 bcf043c9 Iustin Pop
    node = dns_data.name
3328 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
3329 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
3330 a8083063 Iustin Pop
    if secondary_ip is None:
3331 a8083063 Iustin Pop
      secondary_ip = primary_ip
3332 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
3333 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given",
3334 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3335 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
3336 e7c6e02b Michael Hanselmann
3337 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
3338 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
3339 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
3340 5c983ee5 Iustin Pop
                                 node, errors.ECODE_EXISTS)
3341 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
3342 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node %s is not in the configuration" % node,
3343 5c983ee5 Iustin Pop
                                 errors.ECODE_NOENT)
3344 a8083063 Iustin Pop
3345 1513e2dd Iustin Pop
    self.changed_primary_ip = False
3346 1513e2dd Iustin Pop
3347 a8083063 Iustin Pop
    for existing_node_name in node_list:
3348 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
3349 e7c6e02b Michael Hanselmann
3350 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
3351 1513e2dd Iustin Pop
        if existing_node.secondary_ip != secondary_ip:
3352 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
3353 5c983ee5 Iustin Pop
                                     " address configuration as before",
3354 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
3355 1513e2dd Iustin Pop
        if existing_node.primary_ip != primary_ip:
3356 1513e2dd Iustin Pop
          self.changed_primary_ip = True
3357 1513e2dd Iustin Pop
3358 e7c6e02b Michael Hanselmann
        continue
3359 e7c6e02b Michael Hanselmann
3360 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
3361 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
3362 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
3363 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
3364 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
3365 5c983ee5 Iustin Pop
                                   " existing node %s" % existing_node.name,
3366 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
3367 a8083063 Iustin Pop
3368 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
3369 a8083063 Iustin Pop
    # same as for the master
3370 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
3371 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
3372 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
3373 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
3374 a8083063 Iustin Pop
      if master_singlehomed:
3375 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
3376 5c983ee5 Iustin Pop
                                   " new node has one",
3377 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3378 a8083063 Iustin Pop
      else:
3379 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
3380 5c983ee5 Iustin Pop
                                   " new node doesn't have one",
3381 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3382 a8083063 Iustin Pop
3383 5bbd3f7f Michael Hanselmann
    # checks reachability
3384 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
3385 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping",
3386 5c983ee5 Iustin Pop
                                 errors.ECODE_ENVIRON)
3387 a8083063 Iustin Pop
3388 a8083063 Iustin Pop
    if not newbie_singlehomed:
3389 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
3390 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3391 b15d625f Iustin Pop
                           source=myself.secondary_ip):
3392 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3393 5c983ee5 Iustin Pop
                                   " based ping to noded port",
3394 5c983ee5 Iustin Pop
                                   errors.ECODE_ENVIRON)
3395 a8083063 Iustin Pop
3396 a8ae3eb5 Iustin Pop
    if self.op.readd:
3397 a8ae3eb5 Iustin Pop
      exceptions = [node]
3398 a8ae3eb5 Iustin Pop
    else:
3399 a8ae3eb5 Iustin Pop
      exceptions = []
3400 6d7e1f20 Guido Trotter
3401 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
3402 0fff97e9 Guido Trotter
3403 a8ae3eb5 Iustin Pop
    if self.op.readd:
3404 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
3405 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
3406 a8ae3eb5 Iustin Pop
    else:
3407 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
3408 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
3409 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
3410 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
3411 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
3412 a8083063 Iustin Pop
3413 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3414 a8083063 Iustin Pop
    """Adds the new node to the cluster.
3415 a8083063 Iustin Pop

3416 a8083063 Iustin Pop
    """
3417 a8083063 Iustin Pop
    new_node = self.new_node
3418 a8083063 Iustin Pop
    node = new_node.name
3419 a8083063 Iustin Pop
3420 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
3421 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
3422 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
3423 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
3424 a8ae3eb5 Iustin Pop
    if self.op.readd:
3425 7260cfbe Iustin Pop
      new_node.drained = new_node.offline = False # pylint: disable-msg=W0201
3426 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
3427 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
3428 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
3429 1513e2dd Iustin Pop
      if self.changed_primary_ip:
3430 1513e2dd Iustin Pop
        new_node.primary_ip = self.op.primary_ip
3431 a8ae3eb5 Iustin Pop
3432 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
3433 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
3434 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
3435 a8ae3eb5 Iustin Pop
3436 a8083063 Iustin Pop
    # check connectivity
3437 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
3438 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
3439 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
3440 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
3441 90b54c26 Iustin Pop
                   node, result.payload)
3442 a8083063 Iustin Pop
    else:
3443 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
3444 90b54c26 Iustin Pop
                               " node version %s" %
3445 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
3446 a8083063 Iustin Pop
3447 a8083063 Iustin Pop
    # setup ssh on node
3448 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
3449 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
3450 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
3451 b989b9d9 Ken Wehr
      keyarray = []
3452 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
3453 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
3454 b989b9d9 Ken Wehr
                  priv_key, pub_key]
3455 b989b9d9 Ken Wehr
3456 b989b9d9 Ken Wehr
      for i in keyfiles:
3457 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
3458 b989b9d9 Ken Wehr
3459 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
3460 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
3461 b989b9d9 Ken Wehr
                                      keyarray[5])
3462 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
3463 a8083063 Iustin Pop
3464 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
3465 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
3466 7672a621 Iustin Pop
      # FIXME: this should be done via an rpc call to node daemon
3467 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
3468 c8a0948f Michael Hanselmann
3469 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
3470 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
3471 781de953 Iustin Pop
                                                 new_node.secondary_ip)
3472 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3473 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3474 c2fc8250 Iustin Pop
      if not result.payload:
3475 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3476 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
3477 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
3478 a8083063 Iustin Pop
3479 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
3480 5c0527ed Guido Trotter
    node_verify_param = {
3481 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
3482 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
3483 5c0527ed Guido Trotter
    }
3484 5c0527ed Guido Trotter
3485 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
3486 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
3487 5c0527ed Guido Trotter
    for verifier in node_verify_list:
3488 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
3489 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
3490 6f68a739 Iustin Pop
      if nl_payload:
3491 6f68a739 Iustin Pop
        for failed in nl_payload:
3492 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
3493 31821208 Iustin Pop
                      " (checking from %s): %s" %
3494 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
3495 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
3496 ff98055b Iustin Pop
3497 d8470559 Michael Hanselmann
    if self.op.readd:
3498 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
3499 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
3500 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
3501 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
3502 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
3503 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
3504 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
3505 3cebe102 Michael Hanselmann
        msg = result.fail_msg
3506 a8ae3eb5 Iustin Pop
        if msg:
3507 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
3508 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
3509 d8470559 Michael Hanselmann
    else:
3510 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
3511 0debfb35 Guido Trotter
      self.context.AddNode(new_node, self.proc.GetECId())
3512 a8083063 Iustin Pop
3513 a8083063 Iustin Pop
3514 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
3515 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
3516 b31c8676 Iustin Pop

3517 b31c8676 Iustin Pop
  """
3518 b31c8676 Iustin Pop
  HPATH = "node-modify"
3519 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
3520 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
3521 b31c8676 Iustin Pop
  REQ_BGL = False
3522 b31c8676 Iustin Pop
3523 b31c8676 Iustin Pop
  def CheckArguments(self):
3524 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3525 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
3526 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
3527 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
3528 601908d0 Iustin Pop
    _CheckBooleanOpField(self.op, 'auto_promote')
3529 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
3530 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
3531 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification",
3532 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3533 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
3534 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
3535 5c983ee5 Iustin Pop
                                 " state at the same time",
3536 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3537 b31c8676 Iustin Pop
3538 601908d0 Iustin Pop
    # Boolean value that tells us whether we're offlining or draining the node
3539 601908d0 Iustin Pop
    self.offline_or_drain = (self.op.offline == True or
3540 601908d0 Iustin Pop
                             self.op.drained == True)
3541 601908d0 Iustin Pop
    self.deoffline_or_drain = (self.op.offline == False or
3542 601908d0 Iustin Pop
                               self.op.drained == False)
3543 601908d0 Iustin Pop
    self.might_demote = (self.op.master_candidate == False or
3544 601908d0 Iustin Pop
                         self.offline_or_drain)
3545 601908d0 Iustin Pop
3546 601908d0 Iustin Pop
    self.lock_all = self.op.auto_promote and self.might_demote
3547 601908d0 Iustin Pop
3548 601908d0 Iustin Pop
3549 b31c8676 Iustin Pop
  def ExpandNames(self):
3550 601908d0 Iustin Pop
    if self.lock_all:
3551 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
3552 601908d0 Iustin Pop
    else:
3553 601908d0 Iustin Pop
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3554 b31c8676 Iustin Pop
3555 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3556 b31c8676 Iustin Pop
    """Build hooks env.
3557 b31c8676 Iustin Pop

3558 b31c8676 Iustin Pop
    This runs on the master node.
3559 b31c8676 Iustin Pop

3560 b31c8676 Iustin Pop
    """
3561 b31c8676 Iustin Pop
    env = {
3562 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3563 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3564 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3565 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3566 b31c8676 Iustin Pop
      }
3567 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3568 b31c8676 Iustin Pop
          self.op.node_name]
3569 b31c8676 Iustin Pop
    return env, nl, nl
3570 b31c8676 Iustin Pop
3571 b31c8676 Iustin Pop
  def CheckPrereq(self):
3572 b31c8676 Iustin Pop
    """Check prerequisites.
3573 b31c8676 Iustin Pop

3574 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3575 b31c8676 Iustin Pop

3576 b31c8676 Iustin Pop
    """
3577 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3578 b31c8676 Iustin Pop
3579 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3580 97c61d46 Iustin Pop
        self.op.drained is not None or
3581 97c61d46 Iustin Pop
        self.op.offline is not None):
3582 97c61d46 Iustin Pop
      # we can't change the master's node flags
3583 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3584 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3585 5c983ee5 Iustin Pop
                                   " only via masterfailover",
3586 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
3587 97c61d46 Iustin Pop
3588 601908d0 Iustin Pop
3589 601908d0 Iustin Pop
    if node.master_candidate and self.might_demote and not self.lock_all:
3590 601908d0 Iustin Pop
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3591 601908d0 Iustin Pop
      # check if after removing the current node, we're missing master
3592 601908d0 Iustin Pop
      # candidates
3593 601908d0 Iustin Pop
      (mc_remaining, mc_should, _) = \
3594 601908d0 Iustin Pop
          self.cfg.GetMasterCandidateStats(exceptions=[node.name])
3595 8fe9239e Iustin Pop
      if mc_remaining < mc_should:
3596 601908d0 Iustin Pop
        raise errors.OpPrereqError("Not enough master candidates, please"
3597 601908d0 Iustin Pop
                                   " pass auto_promote to allow promotion",
3598 601908d0 Iustin Pop
                                   errors.ECODE_INVAL)
3599 3e83dd48 Iustin Pop
3600 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3601 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3602 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3603 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3604 5c983ee5 Iustin Pop
                                 " to master_candidate" % node.name,
3605 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3606 3a5ba66a Iustin Pop
3607 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3608 601908d0 Iustin Pop
    if (self.deoffline_or_drain and not self.offline_or_drain and not
3609 cea0534a Guido Trotter
        self.op.master_candidate == True and not node.master_candidate):
3610 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3611 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3612 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3613 3d9eb52b Guido Trotter
3614 b31c8676 Iustin Pop
    return
3615 b31c8676 Iustin Pop
3616 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3617 b31c8676 Iustin Pop
    """Modifies a node.
3618 b31c8676 Iustin Pop

3619 b31c8676 Iustin Pop
    """
3620 3a5ba66a Iustin Pop
    node = self.node
3621 b31c8676 Iustin Pop
3622 b31c8676 Iustin Pop
    result = []
3623 c9d443ea Iustin Pop
    changed_mc = False
3624 b31c8676 Iustin Pop
3625 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3626 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3627 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3628 c9d443ea Iustin Pop
      if self.op.offline == True:
3629 c9d443ea Iustin Pop
        if node.master_candidate:
3630 c9d443ea Iustin Pop
          node.master_candidate = False
3631 c9d443ea Iustin Pop
          changed_mc = True
3632 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3633 c9d443ea Iustin Pop
        if node.drained:
3634 c9d443ea Iustin Pop
          node.drained = False
3635 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3636 3a5ba66a Iustin Pop
3637 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3638 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3639 c9d443ea Iustin Pop
      changed_mc = True
3640 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3641 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3642 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3643 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3644 0959c824 Iustin Pop
        if msg:
3645 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3646 b31c8676 Iustin Pop
3647 c9d443ea Iustin Pop
    if self.op.drained is not None:
3648 c9d443ea Iustin Pop
      node.drained = self.op.drained
3649 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3650 c9d443ea Iustin Pop
      if self.op.drained == True:
3651 c9d443ea Iustin Pop
        if node.master_candidate:
3652 c9d443ea Iustin Pop
          node.master_candidate = False
3653 c9d443ea Iustin Pop
          changed_mc = True
3654 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3655 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3656 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3657 dec0d9da Iustin Pop
          if msg:
3658 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3659 c9d443ea Iustin Pop
        if node.offline:
3660 c9d443ea Iustin Pop
          node.offline = False
3661 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3662 c9d443ea Iustin Pop
3663 601908d0 Iustin Pop
    # we locked all nodes, we adjust the CP before updating this node
3664 601908d0 Iustin Pop
    if self.lock_all:
3665 601908d0 Iustin Pop
      _AdjustCandidatePool(self, [node.name])
3666 601908d0 Iustin Pop
3667 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3668 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3669 601908d0 Iustin Pop
3670 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3671 c9d443ea Iustin Pop
    if changed_mc:
3672 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3673 b31c8676 Iustin Pop
3674 b31c8676 Iustin Pop
    return result
3675 b31c8676 Iustin Pop
3676 b31c8676 Iustin Pop
3677 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3678 f5118ade Iustin Pop
  """Powercycles a node.
3679 f5118ade Iustin Pop

3680 f5118ade Iustin Pop
  """
3681 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3682 f5118ade Iustin Pop
  REQ_BGL = False
3683 f5118ade Iustin Pop
3684 f5118ade Iustin Pop
  def CheckArguments(self):
3685 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3686 cf26a87a Iustin Pop
    if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
3687 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3688 5c983ee5 Iustin Pop
                                 " parameter was not set",
3689 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
3690 f5118ade Iustin Pop
3691 f5118ade Iustin Pop
  def ExpandNames(self):
3692 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3693 f5118ade Iustin Pop

3694 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3695 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3696 f5118ade Iustin Pop

3697 f5118ade Iustin Pop
    """
3698 f5118ade Iustin Pop
    self.needed_locks = {}
3699 f5118ade Iustin Pop
3700 f5118ade Iustin Pop
  def CheckPrereq(self):
3701 f5118ade Iustin Pop
    """Check prerequisites.
3702 f5118ade Iustin Pop

3703 f5118ade Iustin Pop
    This LU has no prereqs.
3704 f5118ade Iustin Pop

3705 f5118ade Iustin Pop
    """
3706 f5118ade Iustin Pop
    pass
3707 f5118ade Iustin Pop
3708 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3709 f5118ade Iustin Pop
    """Reboots a node.
3710 f5118ade Iustin Pop

3711 f5118ade Iustin Pop
    """
3712 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3713 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3714 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3715 f5118ade Iustin Pop
    return result.payload
3716 f5118ade Iustin Pop
3717 f5118ade Iustin Pop
3718 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3719 a8083063 Iustin Pop
  """Query cluster configuration.
3720 a8083063 Iustin Pop

3721 a8083063 Iustin Pop
  """
3722 a8083063 Iustin Pop
  _OP_REQP = []
3723 642339cf Guido Trotter
  REQ_BGL = False
3724 642339cf Guido Trotter
3725 642339cf Guido Trotter
  def ExpandNames(self):
3726 642339cf Guido Trotter
    self.needed_locks = {}
3727 a8083063 Iustin Pop
3728 a8083063 Iustin Pop
  def CheckPrereq(self):
3729 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3730 a8083063 Iustin Pop

3731 a8083063 Iustin Pop
    """
3732 a8083063 Iustin Pop
    pass
3733 a8083063 Iustin Pop
3734 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3735 a8083063 Iustin Pop
    """Return cluster config.
3736 a8083063 Iustin Pop

3737 a8083063 Iustin Pop
    """
3738 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3739 17463d22 Renรฉ Nussbaumer
    os_hvp = {}
3740 17463d22 Renรฉ Nussbaumer
3741 17463d22 Renรฉ Nussbaumer
    # Filter just for enabled hypervisors
3742 17463d22 Renรฉ Nussbaumer
    for os_name, hv_dict in cluster.os_hvp.items():
3743 17463d22 Renรฉ Nussbaumer
      os_hvp[os_name] = {}
3744 17463d22 Renรฉ Nussbaumer
      for hv_name, hv_params in hv_dict.items():
3745 17463d22 Renรฉ Nussbaumer
        if hv_name in cluster.enabled_hypervisors:
3746 17463d22 Renรฉ Nussbaumer
          os_hvp[os_name][hv_name] = hv_params
3747 17463d22 Renรฉ Nussbaumer
3748 a8083063 Iustin Pop
    result = {
3749 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3750 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3751 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3752 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3753 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3754 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3755 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3756 469f88e1 Iustin Pop
      "master": cluster.master_node,
3757 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3758 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3759 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3760 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3761 17463d22 Renรฉ Nussbaumer
      "os_hvp": os_hvp,
3762 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3763 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3764 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3765 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3766 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3767 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3768 3953242f Iustin Pop
      "maintain_node_health": cluster.maintain_node_health,
3769 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3770 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3771 259578eb Iustin Pop
      "uuid": cluster.uuid,
3772 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3773 1338f2b4 Balazs Lecz
      "uid_pool": cluster.uid_pool,
3774 a8083063 Iustin Pop
      }
3775 a8083063 Iustin Pop
3776 a8083063 Iustin Pop
    return result
3777 a8083063 Iustin Pop
3778 a8083063 Iustin Pop
3779 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3780 ae5849b5 Michael Hanselmann
  """Return configuration values.
3781 a8083063 Iustin Pop

3782 a8083063 Iustin Pop
  """
3783 a8083063 Iustin Pop
  _OP_REQP = []
3784 642339cf Guido Trotter
  REQ_BGL = False
3785 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3786 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3787 05e50653 Michael Hanselmann
                                  "watcher_pause")
3788 642339cf Guido Trotter
3789 642339cf Guido Trotter
  def ExpandNames(self):
3790 642339cf Guido Trotter
    self.needed_locks = {}
3791 a8083063 Iustin Pop
3792 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3793 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3794 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3795 ae5849b5 Michael Hanselmann
3796 a8083063 Iustin Pop
  def CheckPrereq(self):
3797 a8083063 Iustin Pop
    """No prerequisites.
3798 a8083063 Iustin Pop

3799 a8083063 Iustin Pop
    """
3800 a8083063 Iustin Pop
    pass
3801 a8083063 Iustin Pop
3802 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3803 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3804 a8083063 Iustin Pop

3805 a8083063 Iustin Pop
    """
3806 ae5849b5 Michael Hanselmann
    values = []
3807 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3808 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3809 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3810 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3811 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3812 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3813 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3814 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3815 cac599f1 Michael Hanselmann
        entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3816 ae5849b5 Michael Hanselmann
      else:
3817 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3818 3ccafd0e Iustin Pop
      values.append(entry)
3819 ae5849b5 Michael Hanselmann
    return values
3820 a8083063 Iustin Pop
3821 a8083063 Iustin Pop
3822 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3823 a8083063 Iustin Pop
  """Bring up an instance's disks.
3824 a8083063 Iustin Pop

3825 a8083063 Iustin Pop
  """
3826 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3827 f22a8ba3 Guido Trotter
  REQ_BGL = False
3828 f22a8ba3 Guido Trotter
3829 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3830 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3831 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3832 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3833 f22a8ba3 Guido Trotter
3834 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3835 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3836 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3837 a8083063 Iustin Pop
3838 a8083063 Iustin Pop
  def CheckPrereq(self):
3839 a8083063 Iustin Pop
    """Check prerequisites.
3840 a8083063 Iustin Pop

3841 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3842 a8083063 Iustin Pop

3843 a8083063 Iustin Pop
    """
3844 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3845 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3846 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3847 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3848 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3849 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3850 a8083063 Iustin Pop
3851 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3852 a8083063 Iustin Pop
    """Activate the disks.
3853 a8083063 Iustin Pop

3854 a8083063 Iustin Pop
    """
3855 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3856 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3857 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3858 a8083063 Iustin Pop
    if not disks_ok:
3859 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3860 a8083063 Iustin Pop
3861 a8083063 Iustin Pop
    return disks_info
3862 a8083063 Iustin Pop
3863 a8083063 Iustin Pop
3864 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3865 e3443b36 Iustin Pop
                           ignore_size=False):
3866 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3867 a8083063 Iustin Pop

3868 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3869 a8083063 Iustin Pop

3870 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3871 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3872 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3873 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3874 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3875 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3876 e4376078 Iustin Pop
      won't result in an error return from the function
3877 e3443b36 Iustin Pop
  @type ignore_size: boolean
3878 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3879 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3880 e3443b36 Iustin Pop
      when the size is wrong
3881 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3882 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3883 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3884 a8083063 Iustin Pop

3885 a8083063 Iustin Pop
  """
3886 a8083063 Iustin Pop
  device_info = []
3887 a8083063 Iustin Pop
  disks_ok = True
3888 fdbd668d Iustin Pop
  iname = instance.name
3889 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3890 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3891 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3892 fdbd668d Iustin Pop
3893 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3894 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3895 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3896 fdbd668d Iustin Pop
  # SyncSource, etc.)
3897 fdbd668d Iustin Pop
3898 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3899 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3900 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3901 e3443b36 Iustin Pop
      if ignore_size:
3902 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3903 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3904 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3905 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3906 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3907 53c14ef1 Iustin Pop
      if msg:
3908 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3909 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3910 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3911 fdbd668d Iustin Pop
        if not ignore_secondaries:
3912 a8083063 Iustin Pop
          disks_ok = False
3913 fdbd668d Iustin Pop
3914 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3915 fdbd668d Iustin Pop
3916 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3917 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3918 d52ea991 Michael Hanselmann
    dev_path = None
3919 d52ea991 Michael Hanselmann
3920 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3921 fdbd668d Iustin Pop
      if node != instance.primary_node:
3922 fdbd668d Iustin Pop
        continue
3923 e3443b36 Iustin Pop
      if ignore_size:
3924 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3925 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3926 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3927 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3928 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3929 53c14ef1 Iustin Pop
      if msg:
3930 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3931 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3932 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3933 fdbd668d Iustin Pop
        disks_ok = False
3934 d52ea991 Michael Hanselmann
      else:
3935 d52ea991 Michael Hanselmann
        dev_path = result.payload
3936 d52ea991 Michael Hanselmann
3937 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3938 a8083063 Iustin Pop
3939 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3940 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3941 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3942 b352ab5b Iustin Pop
  for disk in instance.disks:
3943 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3944 b352ab5b Iustin Pop
3945 a8083063 Iustin Pop
  return disks_ok, device_info
3946 a8083063 Iustin Pop
3947 a8083063 Iustin Pop
3948 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3949 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3950 3ecf6786 Iustin Pop

3951 3ecf6786 Iustin Pop
  """
3952 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3953 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3954 fe7b0351 Michael Hanselmann
  if not disks_ok:
3955 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3956 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3957 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3958 86d9d3bb Iustin Pop
                         " secondary node,"
3959 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3960 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3961 fe7b0351 Michael Hanselmann
3962 fe7b0351 Michael Hanselmann
3963 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3964 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3965 a8083063 Iustin Pop

3966 a8083063 Iustin Pop
  """
3967 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3968 f22a8ba3 Guido Trotter
  REQ_BGL = False
3969 f22a8ba3 Guido Trotter
3970 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3971 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3972 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3973 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3974 f22a8ba3 Guido Trotter
3975 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3976 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3977 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3978 a8083063 Iustin Pop
3979 a8083063 Iustin Pop
  def CheckPrereq(self):
3980 a8083063 Iustin Pop
    """Check prerequisites.
3981 a8083063 Iustin Pop

3982 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3983 a8083063 Iustin Pop

3984 a8083063 Iustin Pop
    """
3985 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3986 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3987 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3988 a8083063 Iustin Pop
3989 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3990 a8083063 Iustin Pop
    """Deactivate the disks
3991 a8083063 Iustin Pop

3992 a8083063 Iustin Pop
    """
3993 a8083063 Iustin Pop
    instance = self.instance
3994 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3995 a8083063 Iustin Pop
3996 a8083063 Iustin Pop
3997 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3998 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3999 155d6c75 Guido Trotter

4000 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
4001 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
4002 155d6c75 Guido Trotter

4003 155d6c75 Guido Trotter
  """
4004 31624382 Iustin Pop
  _CheckInstanceDown(lu, instance, "cannot shutdown disks")
4005 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
4006 a8083063 Iustin Pop
4007 a8083063 Iustin Pop
4008 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
4009 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
4010 a8083063 Iustin Pop

4011 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
4012 a8083063 Iustin Pop

4013 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
4014 a8083063 Iustin Pop
  ignored.
4015 a8083063 Iustin Pop

4016 a8083063 Iustin Pop
  """
4017 cacfd1fd Iustin Pop
  all_result = True
4018 a8083063 Iustin Pop
  for disk in instance.disks:
4019 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
4020 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
4021 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
4022 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4023 cacfd1fd Iustin Pop
      if msg:
4024 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
4025 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
4026 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
4027 cacfd1fd Iustin Pop
          all_result = False
4028 cacfd1fd Iustin Pop
  return all_result
4029 a8083063 Iustin Pop
4030 a8083063 Iustin Pop
4031 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
4032 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
4033 d4f16fd9 Iustin Pop

4034 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
4035 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
4036 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
4037 d4f16fd9 Iustin Pop
  exception.
4038 d4f16fd9 Iustin Pop

4039 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
4040 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
4041 e69d05fd Iustin Pop
  @type node: C{str}
4042 e69d05fd Iustin Pop
  @param node: the node to check
4043 e69d05fd Iustin Pop
  @type reason: C{str}
4044 e69d05fd Iustin Pop
  @param reason: string to use in the error message
4045 e69d05fd Iustin Pop
  @type requested: C{int}
4046 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
4047 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
4048 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
4049 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
4050 e69d05fd Iustin Pop
      we cannot check the node
4051 d4f16fd9 Iustin Pop

4052 d4f16fd9 Iustin Pop
  """
4053 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
4054 045dd6d9 Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node,
4055 045dd6d9 Iustin Pop
                       prereq=True, ecode=errors.ECODE_ENVIRON)
4056 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
4057 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
4058 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
4059 5c983ee5 Iustin Pop
                               " was '%s'" % (node, free_mem),
4060 5c983ee5 Iustin Pop
                               errors.ECODE_ENVIRON)
4061 d4f16fd9 Iustin Pop
  if requested > free_mem:
4062 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
4063 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
4064 5c983ee5 Iustin Pop
                               (node, reason, requested, free_mem),
4065 5c983ee5 Iustin Pop
                               errors.ECODE_NORES)
4066 d4f16fd9 Iustin Pop
4067 d4f16fd9 Iustin Pop
4068 701384a9 Iustin Pop
def _CheckNodesFreeDisk(lu, nodenames, requested):
4069 701384a9 Iustin Pop
  """Checks if nodes have enough free disk space in the default VG.
4070 701384a9 Iustin Pop

4071 701384a9 Iustin Pop
  This function check if all given nodes have the needed amount of
4072 701384a9 Iustin Pop
  free disk. In case any node has less disk or we cannot get the
4073 701384a9 Iustin Pop
  information from the node, this function raise an OpPrereqError
4074 701384a9 Iustin Pop
  exception.
4075 701384a9 Iustin Pop

4076 701384a9 Iustin Pop
  @type lu: C{LogicalUnit}
4077 701384a9 Iustin Pop
  @param lu: a logical unit from which we get configuration data
4078 701384a9 Iustin Pop
  @type nodenames: C{list}
4079 3a488770 Iustin Pop
  @param nodenames: the list of node names to check
4080 701384a9 Iustin Pop
  @type requested: C{int}
4081 701384a9 Iustin Pop
  @param requested: the amount of disk in MiB to check for
4082 701384a9 Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough disk, or
4083 701384a9 Iustin Pop
      we cannot check the node
4084 701384a9 Iustin Pop

4085 701384a9 Iustin Pop
  """
4086 701384a9 Iustin Pop
  nodeinfo = lu.rpc.call_node_info(nodenames, lu.cfg.GetVGName(),
4087 701384a9 Iustin Pop
                                   lu.cfg.GetHypervisorType())
4088 701384a9 Iustin Pop
  for node in nodenames:
4089 701384a9 Iustin Pop
    info = nodeinfo[node]
4090 701384a9 Iustin Pop
    info.Raise("Cannot get current information from node %s" % node,
4091 701384a9 Iustin Pop
               prereq=True, ecode=errors.ECODE_ENVIRON)
4092 701384a9 Iustin Pop
    vg_free = info.payload.get("vg_free", None)
4093 701384a9 Iustin Pop
    if not isinstance(vg_free, int):
4094 701384a9 Iustin Pop
      raise errors.OpPrereqError("Can't compute free disk space on node %s,"
4095 701384a9 Iustin Pop
                                 " result was '%s'" % (node, vg_free),
4096 701384a9 Iustin Pop
                                 errors.ECODE_ENVIRON)
4097 701384a9 Iustin Pop
    if requested > vg_free:
4098 701384a9 Iustin Pop
      raise errors.OpPrereqError("Not enough disk space on target node %s:"
4099 701384a9 Iustin Pop
                                 " required %d MiB, available %d MiB" %
4100 701384a9 Iustin Pop
                                 (node, requested, vg_free),
4101 701384a9 Iustin Pop
                                 errors.ECODE_NORES)
4102 701384a9 Iustin Pop
4103 701384a9 Iustin Pop
4104 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
4105 a8083063 Iustin Pop
  """Starts an instance.
4106 a8083063 Iustin Pop

4107 a8083063 Iustin Pop
  """
4108 a8083063 Iustin Pop
  HPATH = "instance-start"
4109 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4110 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
4111 e873317a Guido Trotter
  REQ_BGL = False
4112 e873317a Guido Trotter
4113 e873317a Guido Trotter
  def ExpandNames(self):
4114 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4115 a8083063 Iustin Pop
4116 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4117 a8083063 Iustin Pop
    """Build hooks env.
4118 a8083063 Iustin Pop

4119 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4120 a8083063 Iustin Pop

4121 a8083063 Iustin Pop
    """
4122 a8083063 Iustin Pop
    env = {
4123 a8083063 Iustin Pop
      "FORCE": self.op.force,
4124 a8083063 Iustin Pop
      }
4125 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4126 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4127 a8083063 Iustin Pop
    return env, nl, nl
4128 a8083063 Iustin Pop
4129 a8083063 Iustin Pop
  def CheckPrereq(self):
4130 a8083063 Iustin Pop
    """Check prerequisites.
4131 a8083063 Iustin Pop

4132 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4133 a8083063 Iustin Pop

4134 a8083063 Iustin Pop
    """
4135 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4136 e873317a Guido Trotter
    assert self.instance is not None, \
4137 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4138 a8083063 Iustin Pop
4139 d04aaa2f Iustin Pop
    # extra beparams
4140 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
4141 d04aaa2f Iustin Pop
    if self.beparams:
4142 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
4143 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
4144 5c983ee5 Iustin Pop
                                   " dict" % (type(self.beparams), ),
4145 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4146 d04aaa2f Iustin Pop
      # fill the beparams dict
4147 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
4148 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
4149 d04aaa2f Iustin Pop
4150 d04aaa2f Iustin Pop
    # extra hvparams
4151 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
4152 d04aaa2f Iustin Pop
    if self.hvparams:
4153 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
4154 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
4155 5c983ee5 Iustin Pop
                                   " dict" % (type(self.hvparams), ),
4156 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
4157 d04aaa2f Iustin Pop
4158 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
4159 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
4160 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
4161 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
4162 d04aaa2f Iustin Pop
                                    instance.hvparams)
4163 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
4164 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4165 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
4166 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4167 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
4168 d04aaa2f Iustin Pop
4169 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4170 7527a8a4 Iustin Pop
4171 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4172 5bbd3f7f Michael Hanselmann
    # check bridges existence
4173 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4174 a8083063 Iustin Pop
4175 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
4176 f1926756 Guido Trotter
                                              instance.name,
4177 f1926756 Guido Trotter
                                              instance.hypervisor)
4178 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
4179 045dd6d9 Iustin Pop
                      prereq=True, ecode=errors.ECODE_ENVIRON)
4180 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
4181 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
4182 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
4183 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
4184 d4f16fd9 Iustin Pop
4185 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4186 a8083063 Iustin Pop
    """Start the instance.
4187 a8083063 Iustin Pop

4188 a8083063 Iustin Pop
    """
4189 a8083063 Iustin Pop
    instance = self.instance
4190 a8083063 Iustin Pop
    force = self.op.force
4191 a8083063 Iustin Pop
4192 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
4193 fe482621 Iustin Pop
4194 a8083063 Iustin Pop
    node_current = instance.primary_node
4195 a8083063 Iustin Pop
4196 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
4197 a8083063 Iustin Pop
4198 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
4199 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
4200 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4201 dd279568 Iustin Pop
    if msg:
4202 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4203 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
4204 a8083063 Iustin Pop
4205 a8083063 Iustin Pop
4206 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
4207 bf6929a2 Alexander Schreiber
  """Reboot an instance.
4208 bf6929a2 Alexander Schreiber

4209 bf6929a2 Alexander Schreiber
  """
4210 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
4211 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
4212 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
4213 e873317a Guido Trotter
  REQ_BGL = False
4214 e873317a Guido Trotter
4215 17c3f802 Guido Trotter
  def CheckArguments(self):
4216 17c3f802 Guido Trotter
    """Check the arguments.
4217 17c3f802 Guido Trotter

4218 17c3f802 Guido Trotter
    """
4219 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4220 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4221 17c3f802 Guido Trotter
4222 e873317a Guido Trotter
  def ExpandNames(self):
4223 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
4224 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4225 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
4226 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
4227 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
4228 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
4229 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
4230 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4231 bf6929a2 Alexander Schreiber
4232 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
4233 bf6929a2 Alexander Schreiber
    """Build hooks env.
4234 bf6929a2 Alexander Schreiber

4235 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
4236 bf6929a2 Alexander Schreiber

4237 bf6929a2 Alexander Schreiber
    """
4238 bf6929a2 Alexander Schreiber
    env = {
4239 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4240 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
4241 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4242 bf6929a2 Alexander Schreiber
      }
4243 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4244 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4245 bf6929a2 Alexander Schreiber
    return env, nl, nl
4246 bf6929a2 Alexander Schreiber
4247 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
4248 bf6929a2 Alexander Schreiber
    """Check prerequisites.
4249 bf6929a2 Alexander Schreiber

4250 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
4251 bf6929a2 Alexander Schreiber

4252 bf6929a2 Alexander Schreiber
    """
4253 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4254 e873317a Guido Trotter
    assert self.instance is not None, \
4255 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4256 bf6929a2 Alexander Schreiber
4257 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4258 7527a8a4 Iustin Pop
4259 5bbd3f7f Michael Hanselmann
    # check bridges existence
4260 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
4261 bf6929a2 Alexander Schreiber
4262 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
4263 bf6929a2 Alexander Schreiber
    """Reboot the instance.
4264 bf6929a2 Alexander Schreiber

4265 bf6929a2 Alexander Schreiber
    """
4266 bf6929a2 Alexander Schreiber
    instance = self.instance
4267 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
4268 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
4269 bf6929a2 Alexander Schreiber
4270 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
4271 bf6929a2 Alexander Schreiber
4272 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
4273 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
4274 ae48ac32 Iustin Pop
      for disk in instance.disks:
4275 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
4276 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
4277 17c3f802 Guido Trotter
                                             reboot_type,
4278 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4279 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
4280 bf6929a2 Alexander Schreiber
    else:
4281 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
4282 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
4283 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
4284 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
4285 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
4286 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
4287 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4288 dd279568 Iustin Pop
      if msg:
4289 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4290 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
4291 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
4292 bf6929a2 Alexander Schreiber
4293 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
4294 bf6929a2 Alexander Schreiber
4295 bf6929a2 Alexander Schreiber
4296 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
4297 a8083063 Iustin Pop
  """Shutdown an instance.
4298 a8083063 Iustin Pop

4299 a8083063 Iustin Pop
  """
4300 a8083063 Iustin Pop
  HPATH = "instance-stop"
4301 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4302 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4303 e873317a Guido Trotter
  REQ_BGL = False
4304 e873317a Guido Trotter
4305 6263189c Guido Trotter
  def CheckArguments(self):
4306 6263189c Guido Trotter
    """Check the arguments.
4307 6263189c Guido Trotter

4308 6263189c Guido Trotter
    """
4309 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
4310 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
4311 6263189c Guido Trotter
4312 e873317a Guido Trotter
  def ExpandNames(self):
4313 e873317a Guido Trotter
    self._ExpandAndLockInstance()
4314 a8083063 Iustin Pop
4315 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4316 a8083063 Iustin Pop
    """Build hooks env.
4317 a8083063 Iustin Pop

4318 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4319 a8083063 Iustin Pop

4320 a8083063 Iustin Pop
    """
4321 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4322 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
4323 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4324 a8083063 Iustin Pop
    return env, nl, nl
4325 a8083063 Iustin Pop
4326 a8083063 Iustin Pop
  def CheckPrereq(self):
4327 a8083063 Iustin Pop
    """Check prerequisites.
4328 a8083063 Iustin Pop

4329 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4330 a8083063 Iustin Pop

4331 a8083063 Iustin Pop
    """
4332 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4333 e873317a Guido Trotter
    assert self.instance is not None, \
4334 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4335 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
4336 a8083063 Iustin Pop
4337 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4338 a8083063 Iustin Pop
    """Shutdown the instance.
4339 a8083063 Iustin Pop

4340 a8083063 Iustin Pop
    """
4341 a8083063 Iustin Pop
    instance = self.instance
4342 a8083063 Iustin Pop
    node_current = instance.primary_node
4343 6263189c Guido Trotter
    timeout = self.timeout
4344 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
4345 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4346 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4347 1fae010f Iustin Pop
    if msg:
4348 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
4349 a8083063 Iustin Pop
4350 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
4351 a8083063 Iustin Pop
4352 a8083063 Iustin Pop
4353 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
4354 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
4355 fe7b0351 Michael Hanselmann

4356 fe7b0351 Michael Hanselmann
  """
4357 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
4358 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
4359 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
4360 4e0b4d2d Guido Trotter
  REQ_BGL = False
4361 4e0b4d2d Guido Trotter
4362 4e0b4d2d Guido Trotter
  def ExpandNames(self):
4363 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
4364 fe7b0351 Michael Hanselmann
4365 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
4366 fe7b0351 Michael Hanselmann
    """Build hooks env.
4367 fe7b0351 Michael Hanselmann

4368 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
4369 fe7b0351 Michael Hanselmann

4370 fe7b0351 Michael Hanselmann
    """
4371 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4372 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4373 fe7b0351 Michael Hanselmann
    return env, nl, nl
4374 fe7b0351 Michael Hanselmann
4375 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
4376 fe7b0351 Michael Hanselmann
    """Check prerequisites.
4377 fe7b0351 Michael Hanselmann

4378 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
4379 fe7b0351 Michael Hanselmann

4380 fe7b0351 Michael Hanselmann
    """
4381 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4382 4e0b4d2d Guido Trotter
    assert instance is not None, \
4383 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4384 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4385 4e0b4d2d Guido Trotter
4386 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
4387 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4388 5c983ee5 Iustin Pop
                                 self.op.instance_name,
4389 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
4390 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot reinstall")
4391 d0834de3 Michael Hanselmann
4392 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
4393 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
4394 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4395 d0834de3 Michael Hanselmann
      # OS verification
4396 cf26a87a Iustin Pop
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
4397 231cd901 Iustin Pop
      _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
4398 d0834de3 Michael Hanselmann
4399 fe7b0351 Michael Hanselmann
    self.instance = instance
4400 fe7b0351 Michael Hanselmann
4401 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
4402 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
4403 fe7b0351 Michael Hanselmann

4404 fe7b0351 Michael Hanselmann
    """
4405 fe7b0351 Michael Hanselmann
    inst = self.instance
4406 fe7b0351 Michael Hanselmann
4407 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
4408 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
4409 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
4410 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
4411 d0834de3 Michael Hanselmann
4412 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4413 fe7b0351 Michael Hanselmann
    try:
4414 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
4415 4a0e011f Iustin Pop
      # FIXME: pass debug option from opcode to backend
4416 dd713605 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True,
4417 dd713605 Iustin Pop
                                             self.op.debug_level)
4418 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
4419 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
4420 fe7b0351 Michael Hanselmann
    finally:
4421 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4422 fe7b0351 Michael Hanselmann
4423 fe7b0351 Michael Hanselmann
4424 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
4425 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
4426 bd315bfa Iustin Pop

4427 bd315bfa Iustin Pop
  """
4428 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
4429 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4430 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
4431 bd315bfa Iustin Pop
  REQ_BGL = False
4432 bd315bfa Iustin Pop
4433 bd315bfa Iustin Pop
  def CheckArguments(self):
4434 bd315bfa Iustin Pop
    """Check the arguments.
4435 bd315bfa Iustin Pop

4436 bd315bfa Iustin Pop
    """
4437 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
4438 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter", errors.ECODE_INVAL)
4439 bd315bfa Iustin Pop
    for item in self.op.disks:
4440 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
4441 bd315bfa Iustin Pop
          item < 0):
4442 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
4443 5c983ee5 Iustin Pop
                                   str(item), errors.ECODE_INVAL)
4444 bd315bfa Iustin Pop
4445 bd315bfa Iustin Pop
  def ExpandNames(self):
4446 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
4447 bd315bfa Iustin Pop
4448 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
4449 bd315bfa Iustin Pop
    """Build hooks env.
4450 bd315bfa Iustin Pop

4451 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4452 bd315bfa Iustin Pop

4453 bd315bfa Iustin Pop
    """
4454 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4455 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4456 bd315bfa Iustin Pop
    return env, nl, nl
4457 bd315bfa Iustin Pop
4458 bd315bfa Iustin Pop
  def CheckPrereq(self):
4459 bd315bfa Iustin Pop
    """Check prerequisites.
4460 bd315bfa Iustin Pop

4461 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
4462 bd315bfa Iustin Pop

4463 bd315bfa Iustin Pop
    """
4464 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4465 bd315bfa Iustin Pop
    assert instance is not None, \
4466 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4467 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4468 bd315bfa Iustin Pop
4469 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
4470 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
4471 5c983ee5 Iustin Pop
                                 self.op.instance_name, errors.ECODE_INVAL)
4472 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot recreate disks")
4473 bd315bfa Iustin Pop
4474 bd315bfa Iustin Pop
    if not self.op.disks:
4475 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
4476 bd315bfa Iustin Pop
    else:
4477 bd315bfa Iustin Pop
      for idx in self.op.disks:
4478 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
4479 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
4480 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
4481 bd315bfa Iustin Pop
4482 bd315bfa Iustin Pop
    self.instance = instance
4483 bd315bfa Iustin Pop
4484 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
4485 bd315bfa Iustin Pop
    """Recreate the disks.
4486 bd315bfa Iustin Pop

4487 bd315bfa Iustin Pop
    """
4488 bd315bfa Iustin Pop
    to_skip = []
4489 1122eb25 Iustin Pop
    for idx, _ in enumerate(self.instance.disks):
4490 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
4491 bd315bfa Iustin Pop
        to_skip.append(idx)
4492 bd315bfa Iustin Pop
        continue
4493 bd315bfa Iustin Pop
4494 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
4495 bd315bfa Iustin Pop
4496 bd315bfa Iustin Pop
4497 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
4498 decd5f45 Iustin Pop
  """Rename an instance.
4499 decd5f45 Iustin Pop

4500 decd5f45 Iustin Pop
  """
4501 decd5f45 Iustin Pop
  HPATH = "instance-rename"
4502 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4503 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
4504 decd5f45 Iustin Pop
4505 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
4506 decd5f45 Iustin Pop
    """Build hooks env.
4507 decd5f45 Iustin Pop

4508 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4509 decd5f45 Iustin Pop

4510 decd5f45 Iustin Pop
    """
4511 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4512 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
4513 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4514 decd5f45 Iustin Pop
    return env, nl, nl
4515 decd5f45 Iustin Pop
4516 decd5f45 Iustin Pop
  def CheckPrereq(self):
4517 decd5f45 Iustin Pop
    """Check prerequisites.
4518 decd5f45 Iustin Pop

4519 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
4520 decd5f45 Iustin Pop

4521 decd5f45 Iustin Pop
    """
4522 cf26a87a Iustin Pop
    self.op.instance_name = _ExpandInstanceName(self.cfg,
4523 cf26a87a Iustin Pop
                                                self.op.instance_name)
4524 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4525 cf26a87a Iustin Pop
    assert instance is not None
4526 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
4527 31624382 Iustin Pop
    _CheckInstanceDown(self, instance, "cannot rename")
4528 decd5f45 Iustin Pop
    self.instance = instance
4529 decd5f45 Iustin Pop
4530 decd5f45 Iustin Pop
    # new name verification
4531 104f4ca1 Iustin Pop
    name_info = utils.GetHostInfo(self.op.new_name)
4532 decd5f45 Iustin Pop
4533 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
4534 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
4535 7bde3275 Guido Trotter
    if new_name in instance_list:
4536 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4537 5c983ee5 Iustin Pop
                                 new_name, errors.ECODE_EXISTS)
4538 7bde3275 Guido Trotter
4539 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
4540 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4541 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4542 5c983ee5 Iustin Pop
                                   (name_info.ip, new_name),
4543 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
4544 decd5f45 Iustin Pop
4545 decd5f45 Iustin Pop
4546 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
4547 decd5f45 Iustin Pop
    """Reinstall the instance.
4548 decd5f45 Iustin Pop

4549 decd5f45 Iustin Pop
    """
4550 decd5f45 Iustin Pop
    inst = self.instance
4551 decd5f45 Iustin Pop
    old_name = inst.name
4552 decd5f45 Iustin Pop
4553 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4554 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4555 b23c4333 Manuel Franceschini
4556 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4557 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4558 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4559 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4560 decd5f45 Iustin Pop
4561 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4562 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4563 decd5f45 Iustin Pop
4564 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4565 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4566 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4567 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4568 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4569 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4570 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4571 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4572 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4573 b23c4333 Manuel Franceschini
4574 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4575 decd5f45 Iustin Pop
    try:
4576 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4577 dd713605 Iustin Pop
                                                 old_name, self.op.debug_level)
4578 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4579 96841384 Iustin Pop
      if msg:
4580 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4581 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4582 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4583 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4584 decd5f45 Iustin Pop
    finally:
4585 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4586 decd5f45 Iustin Pop
4587 decd5f45 Iustin Pop
4588 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4589 a8083063 Iustin Pop
  """Remove an instance.
4590 a8083063 Iustin Pop

4591 a8083063 Iustin Pop
  """
4592 a8083063 Iustin Pop
  HPATH = "instance-remove"
4593 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4594 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4595 cf472233 Guido Trotter
  REQ_BGL = False
4596 cf472233 Guido Trotter
4597 17c3f802 Guido Trotter
  def CheckArguments(self):
4598 17c3f802 Guido Trotter
    """Check the arguments.
4599 17c3f802 Guido Trotter

4600 17c3f802 Guido Trotter
    """
4601 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4602 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4603 17c3f802 Guido Trotter
4604 cf472233 Guido Trotter
  def ExpandNames(self):
4605 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4606 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4607 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4608 cf472233 Guido Trotter
4609 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4610 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4611 cf472233 Guido Trotter
      self._LockInstancesNodes()
4612 a8083063 Iustin Pop
4613 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4614 a8083063 Iustin Pop
    """Build hooks env.
4615 a8083063 Iustin Pop

4616 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4617 a8083063 Iustin Pop

4618 a8083063 Iustin Pop
    """
4619 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4620 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4621 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4622 abd8e836 Iustin Pop
    nl_post = list(self.instance.all_nodes) + nl
4623 abd8e836 Iustin Pop
    return env, nl, nl_post
4624 a8083063 Iustin Pop
4625 a8083063 Iustin Pop
  def CheckPrereq(self):
4626 a8083063 Iustin Pop
    """Check prerequisites.
4627 a8083063 Iustin Pop

4628 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4629 a8083063 Iustin Pop

4630 a8083063 Iustin Pop
    """
4631 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4632 cf472233 Guido Trotter
    assert self.instance is not None, \
4633 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4634 a8083063 Iustin Pop
4635 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4636 a8083063 Iustin Pop
    """Remove the instance.
4637 a8083063 Iustin Pop

4638 a8083063 Iustin Pop
    """
4639 a8083063 Iustin Pop
    instance = self.instance
4640 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4641 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4642 a8083063 Iustin Pop
4643 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4644 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4645 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4646 1fae010f Iustin Pop
    if msg:
4647 1d67656e Iustin Pop
      if self.op.ignore_failures:
4648 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4649 1d67656e Iustin Pop
      else:
4650 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4651 1fae010f Iustin Pop
                                 " node %s: %s" %
4652 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4653 a8083063 Iustin Pop
4654 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4655 a8083063 Iustin Pop
4656 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4657 1d67656e Iustin Pop
      if self.op.ignore_failures:
4658 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4659 1d67656e Iustin Pop
      else:
4660 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4661 a8083063 Iustin Pop
4662 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4663 a8083063 Iustin Pop
4664 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4665 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4666 a8083063 Iustin Pop
4667 a8083063 Iustin Pop
4668 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4669 a8083063 Iustin Pop
  """Logical unit for querying instances.
4670 a8083063 Iustin Pop

4671 a8083063 Iustin Pop
  """
4672 7260cfbe Iustin Pop
  # pylint: disable-msg=W0142
4673 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4674 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4675 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4676 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4677 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4678 5b460366 Iustin Pop
                                    "admin_state",
4679 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4680 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4681 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4682 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4683 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4684 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4685 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4686 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4687 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4688 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4689 19bed813 Iustin Pop
                                    "hvparams",
4690 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4691 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4692 7736a5f2 Iustin Pop
                                   for name in constants.HVS_PARAMETERS
4693 7736a5f2 Iustin Pop
                                   if name not in constants.HVC_GLOBALS] +
4694 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4695 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4696 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4697 31bf511f Iustin Pop
4698 a8083063 Iustin Pop
4699 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4700 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4701 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4702 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4703 a8083063 Iustin Pop
4704 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4705 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4706 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4707 7eb9d8f7 Guido Trotter
4708 57a2fb91 Iustin Pop
    if self.op.names:
4709 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4710 7eb9d8f7 Guido Trotter
    else:
4711 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4712 7eb9d8f7 Guido Trotter
4713 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4714 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4715 57a2fb91 Iustin Pop
    if self.do_locking:
4716 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4717 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4718 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4719 7eb9d8f7 Guido Trotter
4720 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4721 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4722 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4723 7eb9d8f7 Guido Trotter
4724 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4725 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4726 7eb9d8f7 Guido Trotter

4727 7eb9d8f7 Guido Trotter
    """
4728 57a2fb91 Iustin Pop
    pass
4729 069dcc86 Iustin Pop
4730 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4731 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4732 a8083063 Iustin Pop

4733 a8083063 Iustin Pop
    """
4734 7260cfbe Iustin Pop
    # pylint: disable-msg=R0912
4735 7260cfbe Iustin Pop
    # way too many branches here
4736 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4737 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4738 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4739 a7f5dc98 Iustin Pop
      if self.do_locking:
4740 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4741 a7f5dc98 Iustin Pop
      else:
4742 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4743 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4744 57a2fb91 Iustin Pop
    else:
4745 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4746 a7f5dc98 Iustin Pop
      if self.do_locking:
4747 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4748 a7f5dc98 Iustin Pop
      else:
4749 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4750 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4751 a7f5dc98 Iustin Pop
      if missing:
4752 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4753 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4754 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4755 c1f1cbb2 Iustin Pop
4756 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4757 a8083063 Iustin Pop
4758 a8083063 Iustin Pop
    # begin data gathering
4759 a8083063 Iustin Pop
4760 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4761 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4762 a8083063 Iustin Pop
4763 a8083063 Iustin Pop
    bad_nodes = []
4764 cbfc4681 Iustin Pop
    off_nodes = []
4765 ec79568d Iustin Pop
    if self.do_node_query:
4766 a8083063 Iustin Pop
      live_data = {}
4767 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4768 a8083063 Iustin Pop
      for name in nodes:
4769 a8083063 Iustin Pop
        result = node_data[name]
4770 cbfc4681 Iustin Pop
        if result.offline:
4771 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4772 cbfc4681 Iustin Pop
          off_nodes.append(name)
4773 3cebe102 Michael Hanselmann
        if result.fail_msg:
4774 a8083063 Iustin Pop
          bad_nodes.append(name)
4775 781de953 Iustin Pop
        else:
4776 2fa74ef4 Iustin Pop
          if result.payload:
4777 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4778 2fa74ef4 Iustin Pop
          # else no instance is alive
4779 a8083063 Iustin Pop
    else:
4780 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4781 a8083063 Iustin Pop
4782 a8083063 Iustin Pop
    # end data gathering
4783 a8083063 Iustin Pop
4784 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4785 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4786 a8083063 Iustin Pop
    output = []
4787 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4788 a8083063 Iustin Pop
    for instance in instance_list:
4789 a8083063 Iustin Pop
      iout = []
4790 7736a5f2 Iustin Pop
      i_hv = cluster.FillHV(instance, skip_globals=True)
4791 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4792 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4793 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4794 a8083063 Iustin Pop
      for field in self.op.output_fields:
4795 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4796 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4797 19bed813 Iustin Pop
          val = getattr(instance, field)
4798 a8083063 Iustin Pop
        elif field == "pnode":
4799 a8083063 Iustin Pop
          val = instance.primary_node
4800 a8083063 Iustin Pop
        elif field == "snodes":
4801 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4802 a8083063 Iustin Pop
        elif field == "admin_state":
4803 0d68c45d Iustin Pop
          val = instance.admin_up
4804 a8083063 Iustin Pop
        elif field == "oper_state":
4805 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4806 8a23d2d3 Iustin Pop
            val = None
4807 a8083063 Iustin Pop
          else:
4808 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4809 d8052456 Iustin Pop
        elif field == "status":
4810 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4811 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4812 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4813 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4814 d8052456 Iustin Pop
          else:
4815 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4816 d8052456 Iustin Pop
            if running:
4817 0d68c45d Iustin Pop
              if instance.admin_up:
4818 d8052456 Iustin Pop
                val = "running"
4819 d8052456 Iustin Pop
              else:
4820 d8052456 Iustin Pop
                val = "ERROR_up"
4821 d8052456 Iustin Pop
            else:
4822 0d68c45d Iustin Pop
              if instance.admin_up:
4823 d8052456 Iustin Pop
                val = "ERROR_down"
4824 d8052456 Iustin Pop
              else:
4825 d8052456 Iustin Pop
                val = "ADMIN_down"
4826 a8083063 Iustin Pop
        elif field == "oper_ram":
4827 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4828 8a23d2d3 Iustin Pop
            val = None
4829 a8083063 Iustin Pop
          elif instance.name in live_data:
4830 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4831 a8083063 Iustin Pop
          else:
4832 a8083063 Iustin Pop
            val = "-"
4833 c1ce76bb Iustin Pop
        elif field == "vcpus":
4834 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4835 a8083063 Iustin Pop
        elif field == "disk_template":
4836 a8083063 Iustin Pop
          val = instance.disk_template
4837 a8083063 Iustin Pop
        elif field == "ip":
4838 39a02558 Guido Trotter
          if instance.nics:
4839 39a02558 Guido Trotter
            val = instance.nics[0].ip
4840 39a02558 Guido Trotter
          else:
4841 39a02558 Guido Trotter
            val = None
4842 638c6349 Guido Trotter
        elif field == "nic_mode":
4843 638c6349 Guido Trotter
          if instance.nics:
4844 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4845 638c6349 Guido Trotter
          else:
4846 638c6349 Guido Trotter
            val = None
4847 638c6349 Guido Trotter
        elif field == "nic_link":
4848 39a02558 Guido Trotter
          if instance.nics:
4849 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4850 638c6349 Guido Trotter
          else:
4851 638c6349 Guido Trotter
            val = None
4852 638c6349 Guido Trotter
        elif field == "bridge":
4853 638c6349 Guido Trotter
          if (instance.nics and
4854 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4855 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4856 39a02558 Guido Trotter
          else:
4857 39a02558 Guido Trotter
            val = None
4858 a8083063 Iustin Pop
        elif field == "mac":
4859 39a02558 Guido Trotter
          if instance.nics:
4860 39a02558 Guido Trotter
            val = instance.nics[0].mac
4861 39a02558 Guido Trotter
          else:
4862 39a02558 Guido Trotter
            val = None
4863 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4864 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4865 ad24e046 Iustin Pop
          try:
4866 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4867 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4868 8a23d2d3 Iustin Pop
            val = None
4869 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4870 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4871 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4872 130a6a6f Iustin Pop
        elif field == "tags":
4873 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4874 338e51e8 Iustin Pop
        elif field == "hvparams":
4875 338e51e8 Iustin Pop
          val = i_hv
4876 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4877 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS and
4878 7736a5f2 Iustin Pop
              field[len(HVPREFIX):] not in constants.HVC_GLOBALS):
4879 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4880 338e51e8 Iustin Pop
        elif field == "beparams":
4881 338e51e8 Iustin Pop
          val = i_be
4882 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4883 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4884 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4885 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4886 71c1af58 Iustin Pop
          # matches a variable list
4887 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4888 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4889 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4890 71c1af58 Iustin Pop
              val = len(instance.disks)
4891 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4892 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4893 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4894 3e0cea06 Iustin Pop
              try:
4895 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4896 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4897 71c1af58 Iustin Pop
                val = None
4898 71c1af58 Iustin Pop
            else:
4899 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4900 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4901 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4902 71c1af58 Iustin Pop
              val = len(instance.nics)
4903 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4904 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4905 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4906 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4907 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4908 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4909 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4910 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4911 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4912 638c6349 Guido Trotter
              val = []
4913 638c6349 Guido Trotter
              for nicp in i_nicp:
4914 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4915 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4916 638c6349 Guido Trotter
                else:
4917 638c6349 Guido Trotter
                  val.append(None)
4918 71c1af58 Iustin Pop
            else:
4919 71c1af58 Iustin Pop
              # index-based item
4920 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4921 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4922 71c1af58 Iustin Pop
                val = None
4923 71c1af58 Iustin Pop
              else:
4924 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4925 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4926 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4927 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4928 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4929 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4930 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4931 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4932 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4933 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4934 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4935 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4936 638c6349 Guido Trotter
                  else:
4937 638c6349 Guido Trotter
                    val = None
4938 71c1af58 Iustin Pop
                else:
4939 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4940 71c1af58 Iustin Pop
          else:
4941 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4942 c1ce76bb Iustin Pop
                           field)
4943 a8083063 Iustin Pop
        else:
4944 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4945 a8083063 Iustin Pop
        iout.append(val)
4946 a8083063 Iustin Pop
      output.append(iout)
4947 a8083063 Iustin Pop
4948 a8083063 Iustin Pop
    return output
4949 a8083063 Iustin Pop
4950 a8083063 Iustin Pop
4951 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4952 a8083063 Iustin Pop
  """Failover an instance.
4953 a8083063 Iustin Pop

4954 a8083063 Iustin Pop
  """
4955 a8083063 Iustin Pop
  HPATH = "instance-failover"
4956 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4957 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4958 c9e5c064 Guido Trotter
  REQ_BGL = False
4959 c9e5c064 Guido Trotter
4960 17c3f802 Guido Trotter
  def CheckArguments(self):
4961 17c3f802 Guido Trotter
    """Check the arguments.
4962 17c3f802 Guido Trotter

4963 17c3f802 Guido Trotter
    """
4964 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4965 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4966 17c3f802 Guido Trotter
4967 c9e5c064 Guido Trotter
  def ExpandNames(self):
4968 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4969 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4970 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4971 c9e5c064 Guido Trotter
4972 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4973 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4974 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4975 a8083063 Iustin Pop
4976 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4977 a8083063 Iustin Pop
    """Build hooks env.
4978 a8083063 Iustin Pop

4979 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4980 a8083063 Iustin Pop

4981 a8083063 Iustin Pop
    """
4982 08eec276 Iustin Pop
    instance = self.instance
4983 08eec276 Iustin Pop
    source_node = instance.primary_node
4984 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
4985 a8083063 Iustin Pop
    env = {
4986 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4987 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4988 08eec276 Iustin Pop
      "OLD_PRIMARY": source_node,
4989 08eec276 Iustin Pop
      "OLD_SECONDARY": target_node,
4990 08eec276 Iustin Pop
      "NEW_PRIMARY": target_node,
4991 08eec276 Iustin Pop
      "NEW_SECONDARY": source_node,
4992 a8083063 Iustin Pop
      }
4993 08eec276 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, instance))
4994 08eec276 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4995 abd8e836 Iustin Pop
    nl_post = list(nl)
4996 abd8e836 Iustin Pop
    nl_post.append(source_node)
4997 abd8e836 Iustin Pop
    return env, nl, nl_post
4998 a8083063 Iustin Pop
4999 a8083063 Iustin Pop
  def CheckPrereq(self):
5000 a8083063 Iustin Pop
    """Check prerequisites.
5001 a8083063 Iustin Pop

5002 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5003 a8083063 Iustin Pop

5004 a8083063 Iustin Pop
    """
5005 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5006 c9e5c064 Guido Trotter
    assert self.instance is not None, \
5007 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5008 a8083063 Iustin Pop
5009 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5010 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
5011 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
5012 5c983ee5 Iustin Pop
                                 " network mirrored, cannot failover.",
5013 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5014 2a710df1 Michael Hanselmann
5015 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
5016 2a710df1 Michael Hanselmann
    if not secondary_nodes:
5017 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
5018 abdf0113 Iustin Pop
                                   "a mirrored disk template")
5019 2a710df1 Michael Hanselmann
5020 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
5021 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
5022 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5023 d27776f0 Iustin Pop
    if instance.admin_up:
5024 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
5025 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5026 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5027 d27776f0 Iustin Pop
                           instance.hypervisor)
5028 d27776f0 Iustin Pop
    else:
5029 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5030 d27776f0 Iustin Pop
                   " instance will not be started")
5031 3a7c308e Guido Trotter
5032 a8083063 Iustin Pop
    # check bridge existance
5033 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5034 a8083063 Iustin Pop
5035 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5036 a8083063 Iustin Pop
    """Failover an instance.
5037 a8083063 Iustin Pop

5038 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
5039 a8083063 Iustin Pop
    starting it on the secondary.
5040 a8083063 Iustin Pop

5041 a8083063 Iustin Pop
    """
5042 a8083063 Iustin Pop
    instance = self.instance
5043 a8083063 Iustin Pop
5044 a8083063 Iustin Pop
    source_node = instance.primary_node
5045 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
5046 a8083063 Iustin Pop
5047 1df79ce6 Michael Hanselmann
    if instance.admin_up:
5048 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
5049 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
5050 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
5051 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
5052 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
5053 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
5054 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
5055 1df79ce6 Michael Hanselmann
    else:
5056 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
5057 a8083063 Iustin Pop
5058 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
5059 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
5060 9a4f63d1 Iustin Pop
                 instance.name, source_node)
5061 a8083063 Iustin Pop
5062 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5063 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5064 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5065 1fae010f Iustin Pop
    if msg:
5066 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
5067 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5068 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
5069 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
5070 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
5071 24a40d57 Iustin Pop
      else:
5072 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5073 1fae010f Iustin Pop
                                 " node %s: %s" %
5074 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
5075 a8083063 Iustin Pop
5076 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
5077 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
5078 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
5079 a8083063 Iustin Pop
5080 a8083063 Iustin Pop
    instance.primary_node = target_node
5081 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
5082 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5083 a8083063 Iustin Pop
5084 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
5085 0d68c45d Iustin Pop
    if instance.admin_up:
5086 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
5087 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
5088 9a4f63d1 Iustin Pop
                   instance.name, target_node)
5089 12a0cfbe Guido Trotter
5090 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5091 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
5092 12a0cfbe Guido Trotter
      if not disks_ok:
5093 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5094 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
5095 a8083063 Iustin Pop
5096 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
5097 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5098 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5099 dd279568 Iustin Pop
      if msg:
5100 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5101 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5102 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
5103 a8083063 Iustin Pop
5104 a8083063 Iustin Pop
5105 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
5106 53c776b5 Iustin Pop
  """Migrate an instance.
5107 53c776b5 Iustin Pop

5108 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
5109 53c776b5 Iustin Pop
  which is done with shutdown.
5110 53c776b5 Iustin Pop

5111 53c776b5 Iustin Pop
  """
5112 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
5113 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5114 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
5115 53c776b5 Iustin Pop
5116 53c776b5 Iustin Pop
  REQ_BGL = False
5117 53c776b5 Iustin Pop
5118 53c776b5 Iustin Pop
  def ExpandNames(self):
5119 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
5120 3e06e001 Michael Hanselmann
5121 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5122 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5123 53c776b5 Iustin Pop
5124 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
5125 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
5126 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
5127 3e06e001 Michael Hanselmann
5128 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
5129 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
5130 53c776b5 Iustin Pop
      self._LockInstancesNodes()
5131 53c776b5 Iustin Pop
5132 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
5133 53c776b5 Iustin Pop
    """Build hooks env.
5134 53c776b5 Iustin Pop

5135 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5136 53c776b5 Iustin Pop

5137 53c776b5 Iustin Pop
    """
5138 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
5139 08eec276 Iustin Pop
    source_node = instance.primary_node
5140 08eec276 Iustin Pop
    target_node = instance.secondary_nodes[0]
5141 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
5142 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
5143 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
5144 08eec276 Iustin Pop
    env.update({
5145 08eec276 Iustin Pop
        "OLD_PRIMARY": source_node,
5146 08eec276 Iustin Pop
        "OLD_SECONDARY": target_node,
5147 08eec276 Iustin Pop
        "NEW_PRIMARY": target_node,
5148 08eec276 Iustin Pop
        "NEW_SECONDARY": source_node,
5149 08eec276 Iustin Pop
        })
5150 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
5151 abd8e836 Iustin Pop
    nl_post = list(nl)
5152 abd8e836 Iustin Pop
    nl_post.append(source_node)
5153 abd8e836 Iustin Pop
    return env, nl, nl_post
5154 53c776b5 Iustin Pop
5155 3e06e001 Michael Hanselmann
5156 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
5157 313bcead Iustin Pop
  """Move an instance by data-copying.
5158 313bcead Iustin Pop

5159 313bcead Iustin Pop
  """
5160 313bcead Iustin Pop
  HPATH = "instance-move"
5161 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5162 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
5163 313bcead Iustin Pop
  REQ_BGL = False
5164 313bcead Iustin Pop
5165 17c3f802 Guido Trotter
  def CheckArguments(self):
5166 17c3f802 Guido Trotter
    """Check the arguments.
5167 17c3f802 Guido Trotter

5168 17c3f802 Guido Trotter
    """
5169 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5170 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5171 17c3f802 Guido Trotter
5172 313bcead Iustin Pop
  def ExpandNames(self):
5173 313bcead Iustin Pop
    self._ExpandAndLockInstance()
5174 cf26a87a Iustin Pop
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
5175 313bcead Iustin Pop
    self.op.target_node = target_node
5176 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
5177 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5178 313bcead Iustin Pop
5179 313bcead Iustin Pop
  def DeclareLocks(self, level):
5180 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
5181 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
5182 313bcead Iustin Pop
5183 313bcead Iustin Pop
  def BuildHooksEnv(self):
5184 313bcead Iustin Pop
    """Build hooks env.
5185 313bcead Iustin Pop

5186 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5187 313bcead Iustin Pop

5188 313bcead Iustin Pop
    """
5189 313bcead Iustin Pop
    env = {
5190 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
5191 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5192 313bcead Iustin Pop
      }
5193 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5194 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
5195 313bcead Iustin Pop
                                       self.op.target_node]
5196 313bcead Iustin Pop
    return env, nl, nl
5197 313bcead Iustin Pop
5198 313bcead Iustin Pop
  def CheckPrereq(self):
5199 313bcead Iustin Pop
    """Check prerequisites.
5200 313bcead Iustin Pop

5201 313bcead Iustin Pop
    This checks that the instance is in the cluster.
5202 313bcead Iustin Pop

5203 313bcead Iustin Pop
    """
5204 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5205 313bcead Iustin Pop
    assert self.instance is not None, \
5206 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
5207 313bcead Iustin Pop
5208 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
5209 313bcead Iustin Pop
    assert node is not None, \
5210 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
5211 313bcead Iustin Pop
5212 313bcead Iustin Pop
    self.target_node = target_node = node.name
5213 313bcead Iustin Pop
5214 313bcead Iustin Pop
    if target_node == instance.primary_node:
5215 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
5216 5c983ee5 Iustin Pop
                                 (instance.name, target_node),
5217 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
5218 313bcead Iustin Pop
5219 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
5220 313bcead Iustin Pop
5221 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
5222 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
5223 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
5224 d1b83918 Iustin Pop
                                   " cannot copy" % idx, errors.ECODE_STATE)
5225 313bcead Iustin Pop
5226 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
5227 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
5228 313bcead Iustin Pop
5229 313bcead Iustin Pop
    if instance.admin_up:
5230 313bcead Iustin Pop
      # check memory requirements on the secondary node
5231 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
5232 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
5233 313bcead Iustin Pop
                           instance.hypervisor)
5234 313bcead Iustin Pop
    else:
5235 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
5236 313bcead Iustin Pop
                   " instance will not be started")
5237 313bcead Iustin Pop
5238 313bcead Iustin Pop
    # check bridge existance
5239 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5240 313bcead Iustin Pop
5241 313bcead Iustin Pop
  def Exec(self, feedback_fn):
5242 313bcead Iustin Pop
    """Move an instance.
5243 313bcead Iustin Pop

5244 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
5245 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
5246 313bcead Iustin Pop

5247 313bcead Iustin Pop
    """
5248 313bcead Iustin Pop
    instance = self.instance
5249 313bcead Iustin Pop
5250 313bcead Iustin Pop
    source_node = instance.primary_node
5251 313bcead Iustin Pop
    target_node = self.target_node
5252 313bcead Iustin Pop
5253 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
5254 313bcead Iustin Pop
                 instance.name, source_node)
5255 313bcead Iustin Pop
5256 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
5257 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
5258 313bcead Iustin Pop
    msg = result.fail_msg
5259 313bcead Iustin Pop
    if msg:
5260 313bcead Iustin Pop
      if self.op.ignore_consistency:
5261 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
5262 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
5263 313bcead Iustin Pop
                             " %s is down. Error details: %s",
5264 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
5265 313bcead Iustin Pop
      else:
5266 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
5267 313bcead Iustin Pop
                                 " node %s: %s" %
5268 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
5269 313bcead Iustin Pop
5270 313bcead Iustin Pop
    # create the target disks
5271 313bcead Iustin Pop
    try:
5272 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
5273 313bcead Iustin Pop
    except errors.OpExecError:
5274 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5275 313bcead Iustin Pop
      try:
5276 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5277 313bcead Iustin Pop
      finally:
5278 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5279 313bcead Iustin Pop
        raise
5280 313bcead Iustin Pop
5281 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
5282 313bcead Iustin Pop
5283 313bcead Iustin Pop
    errs = []
5284 313bcead Iustin Pop
    # activate, get path, copy the data over
5285 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
5286 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
5287 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
5288 313bcead Iustin Pop
                                               instance.name, True)
5289 313bcead Iustin Pop
      if result.fail_msg:
5290 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
5291 313bcead Iustin Pop
                        idx, result.fail_msg)
5292 313bcead Iustin Pop
        errs.append(result.fail_msg)
5293 313bcead Iustin Pop
        break
5294 313bcead Iustin Pop
      dev_path = result.payload
5295 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
5296 313bcead Iustin Pop
                                             target_node, dev_path,
5297 313bcead Iustin Pop
                                             cluster_name)
5298 313bcead Iustin Pop
      if result.fail_msg:
5299 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
5300 313bcead Iustin Pop
                        idx, result.fail_msg)
5301 313bcead Iustin Pop
        errs.append(result.fail_msg)
5302 313bcead Iustin Pop
        break
5303 313bcead Iustin Pop
5304 313bcead Iustin Pop
    if errs:
5305 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
5306 313bcead Iustin Pop
      try:
5307 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
5308 313bcead Iustin Pop
      finally:
5309 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5310 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
5311 313bcead Iustin Pop
                                 (",".join(errs),))
5312 313bcead Iustin Pop
5313 313bcead Iustin Pop
    instance.primary_node = target_node
5314 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
5315 313bcead Iustin Pop
5316 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
5317 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
5318 313bcead Iustin Pop
5319 313bcead Iustin Pop
    # Only start the instance if it's marked as up
5320 313bcead Iustin Pop
    if instance.admin_up:
5321 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
5322 313bcead Iustin Pop
                   instance.name, target_node)
5323 313bcead Iustin Pop
5324 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5325 313bcead Iustin Pop
                                           ignore_secondaries=True)
5326 313bcead Iustin Pop
      if not disks_ok:
5327 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5328 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
5329 313bcead Iustin Pop
5330 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
5331 313bcead Iustin Pop
      msg = result.fail_msg
5332 313bcead Iustin Pop
      if msg:
5333 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
5334 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
5335 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
5336 313bcead Iustin Pop
5337 313bcead Iustin Pop
5338 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
5339 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
5340 80cb875c Michael Hanselmann

5341 80cb875c Michael Hanselmann
  """
5342 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
5343 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
5344 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
5345 80cb875c Michael Hanselmann
  REQ_BGL = False
5346 80cb875c Michael Hanselmann
5347 80cb875c Michael Hanselmann
  def ExpandNames(self):
5348 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
5349 80cb875c Michael Hanselmann
5350 80cb875c Michael Hanselmann
    self.needed_locks = {
5351 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
5352 80cb875c Michael Hanselmann
      }
5353 80cb875c Michael Hanselmann
5354 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5355 80cb875c Michael Hanselmann
5356 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
5357 80cb875c Michael Hanselmann
    names = []
5358 80cb875c Michael Hanselmann
    tasklets = []
5359 80cb875c Michael Hanselmann
5360 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
5361 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
5362 80cb875c Michael Hanselmann
      names.append(inst.name)
5363 80cb875c Michael Hanselmann
5364 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
5365 80cb875c Michael Hanselmann
5366 80cb875c Michael Hanselmann
    self.tasklets = tasklets
5367 80cb875c Michael Hanselmann
5368 80cb875c Michael Hanselmann
    # Declare instance locks
5369 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
5370 80cb875c Michael Hanselmann
5371 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
5372 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
5373 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
5374 80cb875c Michael Hanselmann
5375 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
5376 80cb875c Michael Hanselmann
    """Build hooks env.
5377 80cb875c Michael Hanselmann

5378 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
5379 80cb875c Michael Hanselmann

5380 80cb875c Michael Hanselmann
    """
5381 80cb875c Michael Hanselmann
    env = {
5382 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
5383 80cb875c Michael Hanselmann
      }
5384 80cb875c Michael Hanselmann
5385 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
5386 80cb875c Michael Hanselmann
5387 80cb875c Michael Hanselmann
    return (env, nl, nl)
5388 80cb875c Michael Hanselmann
5389 80cb875c Michael Hanselmann
5390 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
5391 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
5392 3e06e001 Michael Hanselmann
    """Initializes this class.
5393 3e06e001 Michael Hanselmann

5394 3e06e001 Michael Hanselmann
    """
5395 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
5396 464243a7 Michael Hanselmann
5397 3e06e001 Michael Hanselmann
    # Parameters
5398 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
5399 3e06e001 Michael Hanselmann
    self.live = live
5400 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
5401 3e06e001 Michael Hanselmann
5402 53c776b5 Iustin Pop
  def CheckPrereq(self):
5403 53c776b5 Iustin Pop
    """Check prerequisites.
5404 53c776b5 Iustin Pop

5405 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
5406 53c776b5 Iustin Pop

5407 53c776b5 Iustin Pop
    """
5408 cf26a87a Iustin Pop
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
5409 cf26a87a Iustin Pop
    instance = self.cfg.GetInstanceInfo(instance_name)
5410 cf26a87a Iustin Pop
    assert instance is not None
5411 53c776b5 Iustin Pop
5412 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5413 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
5414 5c983ee5 Iustin Pop
                                 " drbd8, cannot migrate.", errors.ECODE_STATE)
5415 53c776b5 Iustin Pop
5416 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
5417 53c776b5 Iustin Pop
    if not secondary_nodes:
5418 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
5419 733a2b6a Iustin Pop
                                      " drbd8 disk template")
5420 53c776b5 Iustin Pop
5421 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
5422 53c776b5 Iustin Pop
5423 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
5424 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
5425 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
5426 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
5427 53c776b5 Iustin Pop
                         instance.hypervisor)
5428 53c776b5 Iustin Pop
5429 53c776b5 Iustin Pop
    # check bridge existance
5430 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
5431 53c776b5 Iustin Pop
5432 3e06e001 Michael Hanselmann
    if not self.cleanup:
5433 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
5434 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
5435 53c776b5 Iustin Pop
                                                 instance)
5436 045dd6d9 Iustin Pop
      result.Raise("Can't migrate, please use failover",
5437 045dd6d9 Iustin Pop
                   prereq=True, ecode=errors.ECODE_STATE)
5438 53c776b5 Iustin Pop
5439 53c776b5 Iustin Pop
    self.instance = instance
5440 53c776b5 Iustin Pop
5441 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
5442 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
5443 53c776b5 Iustin Pop

5444 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
5445 53c776b5 Iustin Pop

5446 53c776b5 Iustin Pop
    """
5447 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
5448 53c776b5 Iustin Pop
    all_done = False
5449 53c776b5 Iustin Pop
    while not all_done:
5450 53c776b5 Iustin Pop
      all_done = True
5451 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
5452 53c776b5 Iustin Pop
                                            self.nodes_ip,
5453 53c776b5 Iustin Pop
                                            self.instance.disks)
5454 53c776b5 Iustin Pop
      min_percent = 100
5455 53c776b5 Iustin Pop
      for node, nres in result.items():
5456 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
5457 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
5458 53c776b5 Iustin Pop
        all_done = all_done and node_done
5459 53c776b5 Iustin Pop
        if node_percent is not None:
5460 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
5461 53c776b5 Iustin Pop
      if not all_done:
5462 53c776b5 Iustin Pop
        if min_percent < 100:
5463 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
5464 53c776b5 Iustin Pop
        time.sleep(2)
5465 53c776b5 Iustin Pop
5466 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
5467 53c776b5 Iustin Pop
    """Demote a node to secondary.
5468 53c776b5 Iustin Pop

5469 53c776b5 Iustin Pop
    """
5470 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
5471 53c776b5 Iustin Pop
5472 53c776b5 Iustin Pop
    for dev in self.instance.disks:
5473 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
5474 53c776b5 Iustin Pop
5475 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
5476 53c776b5 Iustin Pop
                                          self.instance.disks)
5477 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
5478 53c776b5 Iustin Pop
5479 53c776b5 Iustin Pop
  def _GoStandalone(self):
5480 53c776b5 Iustin Pop
    """Disconnect from the network.
5481 53c776b5 Iustin Pop

5482 53c776b5 Iustin Pop
    """
5483 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
5484 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
5485 53c776b5 Iustin Pop
                                               self.instance.disks)
5486 53c776b5 Iustin Pop
    for node, nres in result.items():
5487 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
5488 53c776b5 Iustin Pop
5489 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
5490 53c776b5 Iustin Pop
    """Reconnect to the network.
5491 53c776b5 Iustin Pop

5492 53c776b5 Iustin Pop
    """
5493 53c776b5 Iustin Pop
    if multimaster:
5494 53c776b5 Iustin Pop
      msg = "dual-master"
5495 53c776b5 Iustin Pop
    else:
5496 53c776b5 Iustin Pop
      msg = "single-master"
5497 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
5498 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
5499 53c776b5 Iustin Pop
                                           self.instance.disks,
5500 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
5501 53c776b5 Iustin Pop
    for node, nres in result.items():
5502 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
5503 53c776b5 Iustin Pop
5504 53c776b5 Iustin Pop
  def _ExecCleanup(self):
5505 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
5506 53c776b5 Iustin Pop

5507 53c776b5 Iustin Pop
    The cleanup is done by:
5508 53c776b5 Iustin Pop
      - check that the instance is running only on one node
5509 53c776b5 Iustin Pop
        (and update the config if needed)
5510 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
5511 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5512 53c776b5 Iustin Pop
      - disconnect from the network
5513 53c776b5 Iustin Pop
      - change disks into single-master mode
5514 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
5515 53c776b5 Iustin Pop

5516 53c776b5 Iustin Pop
    """
5517 53c776b5 Iustin Pop
    instance = self.instance
5518 53c776b5 Iustin Pop
    target_node = self.target_node
5519 53c776b5 Iustin Pop
    source_node = self.source_node
5520 53c776b5 Iustin Pop
5521 53c776b5 Iustin Pop
    # check running on only one node
5522 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
5523 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
5524 53c776b5 Iustin Pop
                     " a bad state)")
5525 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
5526 53c776b5 Iustin Pop
    for node, result in ins_l.items():
5527 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
5528 53c776b5 Iustin Pop
5529 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
5530 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
5531 53c776b5 Iustin Pop
5532 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
5533 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
5534 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
5535 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
5536 53c776b5 Iustin Pop
                               " and restart this operation.")
5537 53c776b5 Iustin Pop
5538 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
5539 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
5540 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
5541 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
5542 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
5543 53c776b5 Iustin Pop
5544 53c776b5 Iustin Pop
    if runningon_target:
5545 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
5546 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
5547 53c776b5 Iustin Pop
                       " updating config" % target_node)
5548 53c776b5 Iustin Pop
      instance.primary_node = target_node
5549 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
5550 53c776b5 Iustin Pop
      demoted_node = source_node
5551 53c776b5 Iustin Pop
    else:
5552 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
5553 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
5554 53c776b5 Iustin Pop
      demoted_node = target_node
5555 53c776b5 Iustin Pop
5556 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
5557 53c776b5 Iustin Pop
    try:
5558 53c776b5 Iustin Pop
      self._WaitUntilSync()
5559 53c776b5 Iustin Pop
    except errors.OpExecError:
5560 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
5561 53c776b5 Iustin Pop
      # won't be able to sync
5562 53c776b5 Iustin Pop
      pass
5563 53c776b5 Iustin Pop
    self._GoStandalone()
5564 53c776b5 Iustin Pop
    self._GoReconnect(False)
5565 53c776b5 Iustin Pop
    self._WaitUntilSync()
5566 53c776b5 Iustin Pop
5567 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5568 53c776b5 Iustin Pop
5569 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
5570 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
5571 6906a9d8 Guido Trotter

5572 6906a9d8 Guido Trotter
    """
5573 6906a9d8 Guido Trotter
    target_node = self.target_node
5574 6906a9d8 Guido Trotter
    try:
5575 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
5576 6906a9d8 Guido Trotter
      self._GoStandalone()
5577 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5578 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5579 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5580 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5581 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5582 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5583 3e06e001 Michael Hanselmann
                         str(err))
5584 6906a9d8 Guido Trotter
5585 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5586 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5587 6906a9d8 Guido Trotter

5588 6906a9d8 Guido Trotter
    """
5589 6906a9d8 Guido Trotter
    instance = self.instance
5590 6906a9d8 Guido Trotter
    target_node = self.target_node
5591 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5592 6906a9d8 Guido Trotter
5593 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5594 6906a9d8 Guido Trotter
                                                    instance,
5595 6906a9d8 Guido Trotter
                                                    migration_info,
5596 6906a9d8 Guido Trotter
                                                    False)
5597 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5598 6906a9d8 Guido Trotter
    if abort_msg:
5599 099c52ad Iustin Pop
      logging.error("Aborting migration failed on target node %s: %s",
5600 099c52ad Iustin Pop
                    target_node, abort_msg)
5601 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5602 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5603 6906a9d8 Guido Trotter
5604 53c776b5 Iustin Pop
  def _ExecMigration(self):
5605 53c776b5 Iustin Pop
    """Migrate an instance.
5606 53c776b5 Iustin Pop

5607 53c776b5 Iustin Pop
    The migrate is done by:
5608 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5609 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5610 53c776b5 Iustin Pop
      - migrate the instance
5611 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5612 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5613 53c776b5 Iustin Pop
      - change disks into single-master mode
5614 53c776b5 Iustin Pop

5615 53c776b5 Iustin Pop
    """
5616 53c776b5 Iustin Pop
    instance = self.instance
5617 53c776b5 Iustin Pop
    target_node = self.target_node
5618 53c776b5 Iustin Pop
    source_node = self.source_node
5619 53c776b5 Iustin Pop
5620 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5621 53c776b5 Iustin Pop
    for dev in instance.disks:
5622 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5623 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5624 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5625 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5626 53c776b5 Iustin Pop
5627 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5628 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5629 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5630 6906a9d8 Guido Trotter
    if msg:
5631 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5632 0959c824 Iustin Pop
                 (source_node, msg))
5633 6906a9d8 Guido Trotter
      logging.error(log_err)
5634 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5635 6906a9d8 Guido Trotter
5636 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5637 6906a9d8 Guido Trotter
5638 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5639 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5640 53c776b5 Iustin Pop
    self._GoStandalone()
5641 53c776b5 Iustin Pop
    self._GoReconnect(True)
5642 53c776b5 Iustin Pop
    self._WaitUntilSync()
5643 53c776b5 Iustin Pop
5644 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5645 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5646 6906a9d8 Guido Trotter
                                           instance,
5647 6906a9d8 Guido Trotter
                                           migration_info,
5648 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5649 6906a9d8 Guido Trotter
5650 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5651 6906a9d8 Guido Trotter
    if msg:
5652 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5653 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5654 78212a5d Iustin Pop
      self.feedback_fn("Pre-migration failed, aborting")
5655 6906a9d8 Guido Trotter
      self._AbortMigration()
5656 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5657 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5658 6906a9d8 Guido Trotter
                               (instance.name, msg))
5659 6906a9d8 Guido Trotter
5660 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5661 53c776b5 Iustin Pop
    time.sleep(10)
5662 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5663 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5664 3e06e001 Michael Hanselmann
                                            self.live)
5665 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5666 53c776b5 Iustin Pop
    if msg:
5667 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5668 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5669 78212a5d Iustin Pop
      self.feedback_fn("Migration failed, aborting")
5670 6906a9d8 Guido Trotter
      self._AbortMigration()
5671 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5672 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5673 53c776b5 Iustin Pop
                               (instance.name, msg))
5674 53c776b5 Iustin Pop
    time.sleep(10)
5675 53c776b5 Iustin Pop
5676 53c776b5 Iustin Pop
    instance.primary_node = target_node
5677 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5678 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5679 53c776b5 Iustin Pop
5680 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5681 6906a9d8 Guido Trotter
                                              instance,
5682 6906a9d8 Guido Trotter
                                              migration_info,
5683 6906a9d8 Guido Trotter
                                              True)
5684 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5685 6906a9d8 Guido Trotter
    if msg:
5686 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5687 099c52ad Iustin Pop
                    " %s", msg)
5688 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5689 6906a9d8 Guido Trotter
                               msg)
5690 6906a9d8 Guido Trotter
5691 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5692 53c776b5 Iustin Pop
    self._WaitUntilSync()
5693 53c776b5 Iustin Pop
    self._GoStandalone()
5694 53c776b5 Iustin Pop
    self._GoReconnect(False)
5695 53c776b5 Iustin Pop
    self._WaitUntilSync()
5696 53c776b5 Iustin Pop
5697 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5698 53c776b5 Iustin Pop
5699 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5700 53c776b5 Iustin Pop
    """Perform the migration.
5701 53c776b5 Iustin Pop

5702 53c776b5 Iustin Pop
    """
5703 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5704 80cb875c Michael Hanselmann
5705 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5706 53c776b5 Iustin Pop
5707 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5708 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5709 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5710 53c776b5 Iustin Pop
    self.nodes_ip = {
5711 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5712 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5713 53c776b5 Iustin Pop
      }
5714 3e06e001 Michael Hanselmann
5715 3e06e001 Michael Hanselmann
    if self.cleanup:
5716 53c776b5 Iustin Pop
      return self._ExecCleanup()
5717 53c776b5 Iustin Pop
    else:
5718 53c776b5 Iustin Pop
      return self._ExecMigration()
5719 53c776b5 Iustin Pop
5720 53c776b5 Iustin Pop
5721 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5722 428958aa Iustin Pop
                    info, force_open):
5723 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5724 a8083063 Iustin Pop

5725 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5726 a8083063 Iustin Pop
  all its children.
5727 a8083063 Iustin Pop

5728 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5729 a8083063 Iustin Pop

5730 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5731 428958aa Iustin Pop
  @param node: the node on which to create the device
5732 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5733 428958aa Iustin Pop
  @param instance: the instance which owns the device
5734 428958aa Iustin Pop
  @type device: L{objects.Disk}
5735 428958aa Iustin Pop
  @param device: the device to create
5736 428958aa Iustin Pop
  @type force_create: boolean
5737 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5738 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5739 428958aa Iustin Pop
      CreateOnSecondary() attribute
5740 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5741 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5742 428958aa Iustin Pop
  @type force_open: boolean
5743 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5744 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5745 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5746 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5747 428958aa Iustin Pop

5748 a8083063 Iustin Pop
  """
5749 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5750 428958aa Iustin Pop
    force_create = True
5751 796cab27 Iustin Pop
5752 a8083063 Iustin Pop
  if device.children:
5753 a8083063 Iustin Pop
    for child in device.children:
5754 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5755 428958aa Iustin Pop
                      info, force_open)
5756 a8083063 Iustin Pop
5757 428958aa Iustin Pop
  if not force_create:
5758 796cab27 Iustin Pop
    return
5759 796cab27 Iustin Pop
5760 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5761 de12473a Iustin Pop
5762 de12473a Iustin Pop
5763 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5764 de12473a Iustin Pop
  """Create a single block device on a given node.
5765 de12473a Iustin Pop

5766 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5767 de12473a Iustin Pop
  created in advance.
5768 de12473a Iustin Pop

5769 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5770 de12473a Iustin Pop
  @param node: the node on which to create the device
5771 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5772 de12473a Iustin Pop
  @param instance: the instance which owns the device
5773 de12473a Iustin Pop
  @type device: L{objects.Disk}
5774 de12473a Iustin Pop
  @param device: the device to create
5775 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5776 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5777 de12473a Iustin Pop
  @type force_open: boolean
5778 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5779 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5780 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5781 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5782 de12473a Iustin Pop

5783 de12473a Iustin Pop
  """
5784 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5785 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5786 428958aa Iustin Pop
                                       instance.name, force_open, info)
5787 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5788 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5789 a8083063 Iustin Pop
  if device.physical_id is None:
5790 0959c824 Iustin Pop
    device.physical_id = result.payload
5791 a8083063 Iustin Pop
5792 a8083063 Iustin Pop
5793 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5794 923b1523 Iustin Pop
  """Generate a suitable LV name.
5795 923b1523 Iustin Pop

5796 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5797 923b1523 Iustin Pop

5798 923b1523 Iustin Pop
  """
5799 923b1523 Iustin Pop
  results = []
5800 923b1523 Iustin Pop
  for val in exts:
5801 4fae38c5 Guido Trotter
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
5802 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5803 923b1523 Iustin Pop
  return results
5804 923b1523 Iustin Pop
5805 923b1523 Iustin Pop
5806 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5807 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5808 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5809 a1f445d3 Iustin Pop

5810 a1f445d3 Iustin Pop
  """
5811 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5812 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5813 afa1386e Guido Trotter
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
5814 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5815 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5816 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5817 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5818 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5819 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5820 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5821 f9518d38 Iustin Pop
                                      shared_secret),
5822 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5823 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5824 a1f445d3 Iustin Pop
  return drbd_dev
5825 a1f445d3 Iustin Pop
5826 7c0d6283 Michael Hanselmann
5827 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5828 a8083063 Iustin Pop
                          instance_name, primary_node,
5829 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5830 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5831 e2a65344 Iustin Pop
                          base_index):
5832 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5833 a8083063 Iustin Pop

5834 a8083063 Iustin Pop
  """
5835 a8083063 Iustin Pop
  #TODO: compute space requirements
5836 a8083063 Iustin Pop
5837 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5838 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5839 08db7c5c Iustin Pop
  disks = []
5840 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5841 08db7c5c Iustin Pop
    pass
5842 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5843 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5844 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5845 923b1523 Iustin Pop
5846 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5847 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5848 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5849 e2a65344 Iustin Pop
      disk_index = idx + base_index
5850 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5851 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5852 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5853 6ec66eae Iustin Pop
                              mode=disk["mode"])
5854 08db7c5c Iustin Pop
      disks.append(disk_dev)
5855 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5856 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5857 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5858 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5859 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5860 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5861 08db7c5c Iustin Pop
5862 e6c1ff2f Iustin Pop
    names = []
5863 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5864 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5865 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5866 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5867 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5868 112050d9 Iustin Pop
      disk_index = idx + base_index
5869 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5870 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5871 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5872 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5873 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5874 08db7c5c Iustin Pop
      disks.append(disk_dev)
5875 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5876 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5877 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5878 0f1a06e3 Manuel Franceschini
5879 0e3baaf3 Iustin Pop
    _RequireFileStorage()
5880 0e3baaf3 Iustin Pop
5881 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5882 112050d9 Iustin Pop
      disk_index = idx + base_index
5883 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5884 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5885 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5886 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5887 43e99cff Guido Trotter
                                                         disk_index)),
5888 6ec66eae Iustin Pop
                              mode=disk["mode"])
5889 08db7c5c Iustin Pop
      disks.append(disk_dev)
5890 a8083063 Iustin Pop
  else:
5891 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5892 a8083063 Iustin Pop
  return disks
5893 a8083063 Iustin Pop
5894 a8083063 Iustin Pop
5895 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5896 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5897 3ecf6786 Iustin Pop

5898 3ecf6786 Iustin Pop
  """
5899 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5900 a0c3fea1 Michael Hanselmann
5901 a0c3fea1 Michael Hanselmann
5902 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5903 a8083063 Iustin Pop
  """Create all disks for an instance.
5904 a8083063 Iustin Pop

5905 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5906 a8083063 Iustin Pop

5907 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5908 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5909 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5910 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5911 bd315bfa Iustin Pop
  @type to_skip: list
5912 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5913 621b7678 Iustin Pop
  @type target_node: string
5914 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5915 e4376078 Iustin Pop
  @rtype: boolean
5916 e4376078 Iustin Pop
  @return: the success of the creation
5917 a8083063 Iustin Pop

5918 a8083063 Iustin Pop
  """
5919 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5920 621b7678 Iustin Pop
  if target_node is None:
5921 621b7678 Iustin Pop
    pnode = instance.primary_node
5922 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5923 621b7678 Iustin Pop
  else:
5924 621b7678 Iustin Pop
    pnode = target_node
5925 621b7678 Iustin Pop
    all_nodes = [pnode]
5926 a0c3fea1 Michael Hanselmann
5927 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5928 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5929 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5930 0f1a06e3 Manuel Franceschini
5931 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5932 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5933 0f1a06e3 Manuel Franceschini
5934 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5935 24991749 Iustin Pop
  # LUSetInstanceParams
5936 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5937 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5938 bd315bfa Iustin Pop
      continue
5939 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5940 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5941 a8083063 Iustin Pop
    #HARDCODE
5942 621b7678 Iustin Pop
    for node in all_nodes:
5943 428958aa Iustin Pop
      f_create = node == pnode
5944 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5945 a8083063 Iustin Pop
5946 a8083063 Iustin Pop
5947 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5948 a8083063 Iustin Pop
  """Remove all disks for an instance.
5949 a8083063 Iustin Pop

5950 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5951 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5952 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5953 a8083063 Iustin Pop
  with `_CreateDisks()`).
5954 a8083063 Iustin Pop

5955 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5956 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5957 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5958 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5959 621b7678 Iustin Pop
  @type target_node: string
5960 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5961 e4376078 Iustin Pop
  @rtype: boolean
5962 e4376078 Iustin Pop
  @return: the success of the removal
5963 a8083063 Iustin Pop

5964 a8083063 Iustin Pop
  """
5965 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5966 a8083063 Iustin Pop
5967 e1bc0878 Iustin Pop
  all_result = True
5968 a8083063 Iustin Pop
  for device in instance.disks:
5969 621b7678 Iustin Pop
    if target_node:
5970 621b7678 Iustin Pop
      edata = [(target_node, device)]
5971 621b7678 Iustin Pop
    else:
5972 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5973 621b7678 Iustin Pop
    for node, disk in edata:
5974 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5975 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5976 e1bc0878 Iustin Pop
      if msg:
5977 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5978 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5979 e1bc0878 Iustin Pop
        all_result = False
5980 0f1a06e3 Manuel Franceschini
5981 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5982 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5983 dfc2a24c Guido Trotter
    if target_node:
5984 dfc2a24c Guido Trotter
      tgt = target_node
5985 621b7678 Iustin Pop
    else:
5986 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5987 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5988 621b7678 Iustin Pop
    if result.fail_msg:
5989 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5990 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5991 e1bc0878 Iustin Pop
      all_result = False
5992 0f1a06e3 Manuel Franceschini
5993 e1bc0878 Iustin Pop
  return all_result
5994 a8083063 Iustin Pop
5995 a8083063 Iustin Pop
5996 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5997 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5998 e2fe6369 Iustin Pop

5999 e2fe6369 Iustin Pop
  """
6000 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
6001 e2fe6369 Iustin Pop
  req_size_dict = {
6002 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
6003 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
6004 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
6005 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
6006 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
6007 e2fe6369 Iustin Pop
  }
6008 e2fe6369 Iustin Pop
6009 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
6010 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
6011 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
6012 e2fe6369 Iustin Pop
6013 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
6014 e2fe6369 Iustin Pop
6015 e2fe6369 Iustin Pop
6016 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
6017 74409b12 Iustin Pop
  """Hypervisor parameter validation.
6018 74409b12 Iustin Pop

6019 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
6020 74409b12 Iustin Pop
  used in both instance create and instance modify.
6021 74409b12 Iustin Pop

6022 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
6023 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
6024 74409b12 Iustin Pop
  @type nodenames: list
6025 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
6026 74409b12 Iustin Pop
  @type hvname: string
6027 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
6028 74409b12 Iustin Pop
  @type hvparams: dict
6029 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
6030 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
6031 74409b12 Iustin Pop

6032 74409b12 Iustin Pop
  """
6033 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
6034 74409b12 Iustin Pop
                                                  hvname,
6035 74409b12 Iustin Pop
                                                  hvparams)
6036 74409b12 Iustin Pop
  for node in nodenames:
6037 781de953 Iustin Pop
    info = hvinfo[node]
6038 68c6f21c Iustin Pop
    if info.offline:
6039 68c6f21c Iustin Pop
      continue
6040 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
6041 74409b12 Iustin Pop
6042 74409b12 Iustin Pop
6043 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
6044 a8083063 Iustin Pop
  """Create an instance.
6045 a8083063 Iustin Pop

6046 a8083063 Iustin Pop
  """
6047 a8083063 Iustin Pop
  HPATH = "instance-add"
6048 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6049 f276c4b5 Iustin Pop
  _OP_REQP = ["instance_name", "disks",
6050 08db7c5c Iustin Pop
              "mode", "start",
6051 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
6052 338e51e8 Iustin Pop
              "hvparams", "beparams"]
6053 7baf741d Guido Trotter
  REQ_BGL = False
6054 7baf741d Guido Trotter
6055 5f23e043 Iustin Pop
  def CheckArguments(self):
6056 5f23e043 Iustin Pop
    """Check arguments.
6057 5f23e043 Iustin Pop

6058 5f23e043 Iustin Pop
    """
6059 df4272e5 Iustin Pop
    # set optional parameters to none if they don't exist
6060 f276c4b5 Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor",
6061 e588764d Iustin Pop
                 "disk_template", "identify_defaults"]:
6062 df4272e5 Iustin Pop
      if not hasattr(self.op, attr):
6063 df4272e5 Iustin Pop
        setattr(self.op, attr, None)
6064 df4272e5 Iustin Pop
6065 5f23e043 Iustin Pop
    # do not require name_check to ease forward/backward compatibility
6066 5f23e043 Iustin Pop
    # for tools
6067 5f23e043 Iustin Pop
    if not hasattr(self.op, "name_check"):
6068 5f23e043 Iustin Pop
      self.op.name_check = True
6069 25a8792c Iustin Pop
    if not hasattr(self.op, "no_install"):
6070 25a8792c Iustin Pop
      self.op.no_install = False
6071 25a8792c Iustin Pop
    if self.op.no_install and self.op.start:
6072 25a8792c Iustin Pop
      self.LogInfo("No-installation mode selected, disabling startup")
6073 25a8792c Iustin Pop
      self.op.start = False
6074 44caf5a8 Iustin Pop
    # validate/normalize the instance name
6075 44caf5a8 Iustin Pop
    self.op.instance_name = utils.HostInfo.NormalizeName(self.op.instance_name)
6076 5f23e043 Iustin Pop
    if self.op.ip_check and not self.op.name_check:
6077 5f23e043 Iustin Pop
      # TODO: make the ip check more flexible and not depend on the name check
6078 5f23e043 Iustin Pop
      raise errors.OpPrereqError("Cannot do ip checks without a name check",
6079 5f23e043 Iustin Pop
                                 errors.ECODE_INVAL)
6080 c3589cf8 Iustin Pop
    # check disk information: either all adopt, or no adopt
6081 c3589cf8 Iustin Pop
    has_adopt = has_no_adopt = False
6082 c3589cf8 Iustin Pop
    for disk in self.op.disks:
6083 c3589cf8 Iustin Pop
      if "adopt" in disk:
6084 c3589cf8 Iustin Pop
        has_adopt = True
6085 c3589cf8 Iustin Pop
      else:
6086 c3589cf8 Iustin Pop
        has_no_adopt = True
6087 c3589cf8 Iustin Pop
    if has_adopt and has_no_adopt:
6088 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Either all disks are adopted or none is",
6089 c3589cf8 Iustin Pop
                                 errors.ECODE_INVAL)
6090 c3589cf8 Iustin Pop
    if has_adopt:
6091 c3589cf8 Iustin Pop
      if self.op.disk_template != constants.DT_PLAIN:
6092 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption is only supported for the"
6093 c3589cf8 Iustin Pop
                                   " 'plain' disk template",
6094 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6095 c3589cf8 Iustin Pop
      if self.op.iallocator is not None:
6096 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed with an"
6097 c3589cf8 Iustin Pop
                                   " iallocator script", errors.ECODE_INVAL)
6098 c3589cf8 Iustin Pop
      if self.op.mode == constants.INSTANCE_IMPORT:
6099 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Disk adoption not allowed for"
6100 c3589cf8 Iustin Pop
                                   " instance import", errors.ECODE_INVAL)
6101 c3589cf8 Iustin Pop
6102 c3589cf8 Iustin Pop
    self.adopt_disks = has_adopt
6103 5f23e043 Iustin Pop
6104 417eabe2 Iustin Pop
    # verify creation mode
6105 417eabe2 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
6106 417eabe2 Iustin Pop
                            constants.INSTANCE_IMPORT):
6107 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
6108 417eabe2 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
6109 417eabe2 Iustin Pop
6110 417eabe2 Iustin Pop
    # instance name verification
6111 417eabe2 Iustin Pop
    if self.op.name_check:
6112 417eabe2 Iustin Pop
      self.hostname1 = utils.GetHostInfo(self.op.instance_name)
6113 417eabe2 Iustin Pop
      self.op.instance_name = self.hostname1.name
6114 417eabe2 Iustin Pop
      # used in CheckPrereq for ip ping check
6115 417eabe2 Iustin Pop
      self.check_ip = self.hostname1.ip
6116 417eabe2 Iustin Pop
    else:
6117 417eabe2 Iustin Pop
      self.check_ip = None
6118 417eabe2 Iustin Pop
6119 417eabe2 Iustin Pop
    # file storage checks
6120 417eabe2 Iustin Pop
    if (self.op.file_driver and
6121 417eabe2 Iustin Pop
        not self.op.file_driver in constants.FILE_DRIVER):
6122 417eabe2 Iustin Pop
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
6123 417eabe2 Iustin Pop
                                 self.op.file_driver, errors.ECODE_INVAL)
6124 417eabe2 Iustin Pop
6125 417eabe2 Iustin Pop
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
6126 417eabe2 Iustin Pop
      raise errors.OpPrereqError("File storage directory path not absolute",
6127 417eabe2 Iustin Pop
                                 errors.ECODE_INVAL)
6128 417eabe2 Iustin Pop
6129 417eabe2 Iustin Pop
    ### Node/iallocator related checks
6130 417eabe2 Iustin Pop
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
6131 417eabe2 Iustin Pop
      raise errors.OpPrereqError("One and only one of iallocator and primary"
6132 417eabe2 Iustin Pop
                                 " node must be given",
6133 417eabe2 Iustin Pop
                                 errors.ECODE_INVAL)
6134 417eabe2 Iustin Pop
6135 417eabe2 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6136 417eabe2 Iustin Pop
      # On import force_variant must be True, because if we forced it at
6137 417eabe2 Iustin Pop
      # initial install, our only chance when importing it back is that it
6138 417eabe2 Iustin Pop
      # works again!
6139 417eabe2 Iustin Pop
      self.op.force_variant = True
6140 417eabe2 Iustin Pop
6141 417eabe2 Iustin Pop
      if self.op.no_install:
6142 417eabe2 Iustin Pop
        self.LogInfo("No-installation mode has no effect during import")
6143 417eabe2 Iustin Pop
6144 417eabe2 Iustin Pop
    else: # INSTANCE_CREATE
6145 417eabe2 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
6146 417eabe2 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified",
6147 417eabe2 Iustin Pop
                                   errors.ECODE_INVAL)
6148 417eabe2 Iustin Pop
      self.op.force_variant = getattr(self.op, "force_variant", False)
6149 f276c4b5 Iustin Pop
      if self.op.disk_template is None:
6150 f276c4b5 Iustin Pop
        raise errors.OpPrereqError("No disk template specified",
6151 f276c4b5 Iustin Pop
                                   errors.ECODE_INVAL)
6152 417eabe2 Iustin Pop
6153 7baf741d Guido Trotter
  def ExpandNames(self):
6154 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
6155 7baf741d Guido Trotter

6156 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
6157 7baf741d Guido Trotter

6158 7baf741d Guido Trotter
    """
6159 7baf741d Guido Trotter
    self.needed_locks = {}
6160 7baf741d Guido Trotter
6161 417eabe2 Iustin Pop
    instance_name = self.op.instance_name
6162 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
6163 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
6164 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
6165 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
6166 5c983ee5 Iustin Pop
                                 instance_name, errors.ECODE_EXISTS)
6167 7baf741d Guido Trotter
6168 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
6169 7baf741d Guido Trotter
6170 7baf741d Guido Trotter
    if self.op.iallocator:
6171 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6172 7baf741d Guido Trotter
    else:
6173 cf26a87a Iustin Pop
      self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
6174 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
6175 7baf741d Guido Trotter
      if self.op.snode is not None:
6176 cf26a87a Iustin Pop
        self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
6177 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
6178 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
6179 7baf741d Guido Trotter
6180 7baf741d Guido Trotter
    # in case of import lock the source node too
6181 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6182 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
6183 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
6184 7baf741d Guido Trotter
6185 b9322a9f Guido Trotter
      if src_path is None:
6186 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
6187 b9322a9f Guido Trotter
6188 b9322a9f Guido Trotter
      if src_node is None:
6189 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6190 b9322a9f Guido Trotter
        self.op.src_node = None
6191 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
6192 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
6193 5c983ee5 Iustin Pop
                                     " path requires a source node option.",
6194 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
6195 b9322a9f Guido Trotter
      else:
6196 cf26a87a Iustin Pop
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
6197 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
6198 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
6199 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
6200 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
6201 c4feafe8 Iustin Pop
            utils.PathJoin(constants.EXPORT_DIR, src_path)
6202 7baf741d Guido Trotter
6203 538475ca Iustin Pop
  def _RunAllocator(self):
6204 538475ca Iustin Pop
    """Run the allocator based on input opcode.
6205 538475ca Iustin Pop

6206 538475ca Iustin Pop
    """
6207 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
6208 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
6209 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
6210 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
6211 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
6212 d1c2dd75 Iustin Pop
                     tags=[],
6213 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
6214 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
6215 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
6216 08db7c5c Iustin Pop
                     disks=self.disks,
6217 d1c2dd75 Iustin Pop
                     nics=nics,
6218 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
6219 29859cb7 Iustin Pop
                     )
6220 d1c2dd75 Iustin Pop
6221 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
6222 d1c2dd75 Iustin Pop
6223 d1c2dd75 Iustin Pop
    if not ial.success:
6224 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
6225 5c983ee5 Iustin Pop
                                 " iallocator '%s': %s" %
6226 5c983ee5 Iustin Pop
                                 (self.op.iallocator, ial.info),
6227 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
6228 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
6229 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6230 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
6231 680f0a89 Iustin Pop
                                 (self.op.iallocator, len(ial.result),
6232 5c983ee5 Iustin Pop
                                  ial.required_nodes), errors.ECODE_FAULT)
6233 680f0a89 Iustin Pop
    self.op.pnode = ial.result[0]
6234 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
6235 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
6236 680f0a89 Iustin Pop
                 utils.CommaJoin(ial.result))
6237 27579978 Iustin Pop
    if ial.required_nodes == 2:
6238 680f0a89 Iustin Pop
      self.op.snode = ial.result[1]
6239 538475ca Iustin Pop
6240 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6241 a8083063 Iustin Pop
    """Build hooks env.
6242 a8083063 Iustin Pop

6243 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
6244 a8083063 Iustin Pop

6245 a8083063 Iustin Pop
    """
6246 a8083063 Iustin Pop
    env = {
6247 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
6248 a8083063 Iustin Pop
      }
6249 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6250 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
6251 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
6252 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
6253 396e1b78 Michael Hanselmann
6254 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
6255 2c2690c9 Iustin Pop
      name=self.op.instance_name,
6256 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
6257 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
6258 4978db17 Iustin Pop
      status=self.op.start,
6259 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
6260 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
6261 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
6262 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
6263 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
6264 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
6265 67fc3042 Iustin Pop
      bep=self.be_full,
6266 67fc3042 Iustin Pop
      hvp=self.hv_full,
6267 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
6268 396e1b78 Michael Hanselmann
    ))
6269 a8083063 Iustin Pop
6270 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
6271 a8083063 Iustin Pop
          self.secondaries)
6272 a8083063 Iustin Pop
    return env, nl, nl
6273 a8083063 Iustin Pop
6274 c1c31426 Iustin Pop
  def _ReadExportInfo(self):
6275 c1c31426 Iustin Pop
    """Reads the export information from disk.
6276 c1c31426 Iustin Pop

6277 c1c31426 Iustin Pop
    It will override the opcode source node and path with the actual
6278 c1c31426 Iustin Pop
    information, if these two were not specified before.
6279 c1c31426 Iustin Pop

6280 c1c31426 Iustin Pop
    @return: the export information
6281 c1c31426 Iustin Pop

6282 c1c31426 Iustin Pop
    """
6283 c1c31426 Iustin Pop
    assert self.op.mode == constants.INSTANCE_IMPORT
6284 c1c31426 Iustin Pop
6285 c1c31426 Iustin Pop
    src_node = self.op.src_node
6286 c1c31426 Iustin Pop
    src_path = self.op.src_path
6287 c1c31426 Iustin Pop
6288 c1c31426 Iustin Pop
    if src_node is None:
6289 c1c31426 Iustin Pop
      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6290 c1c31426 Iustin Pop
      exp_list = self.rpc.call_export_list(locked_nodes)
6291 c1c31426 Iustin Pop
      found = False
6292 c1c31426 Iustin Pop
      for node in exp_list:
6293 c1c31426 Iustin Pop
        if exp_list[node].fail_msg:
6294 c1c31426 Iustin Pop
          continue
6295 c1c31426 Iustin Pop
        if src_path in exp_list[node].payload:
6296 c1c31426 Iustin Pop
          found = True
6297 c1c31426 Iustin Pop
          self.op.src_node = src_node = node
6298 c1c31426 Iustin Pop
          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
6299 c1c31426 Iustin Pop
                                                       src_path)
6300 c1c31426 Iustin Pop
          break
6301 c1c31426 Iustin Pop
      if not found:
6302 c1c31426 Iustin Pop
        raise errors.OpPrereqError("No export found for relative path %s" %
6303 c1c31426 Iustin Pop
                                    src_path, errors.ECODE_INVAL)
6304 c1c31426 Iustin Pop
6305 c1c31426 Iustin Pop
    _CheckNodeOnline(self, src_node)
6306 c1c31426 Iustin Pop
    result = self.rpc.call_export_info(src_node, src_path)
6307 c1c31426 Iustin Pop
    result.Raise("No export or invalid export found in dir %s" % src_path)
6308 c1c31426 Iustin Pop
6309 c1c31426 Iustin Pop
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
6310 c1c31426 Iustin Pop
    if not export_info.has_section(constants.INISECT_EXP):
6311 c1c31426 Iustin Pop
      raise errors.ProgrammerError("Corrupted export config",
6312 c1c31426 Iustin Pop
                                   errors.ECODE_ENVIRON)
6313 c1c31426 Iustin Pop
6314 c1c31426 Iustin Pop
    ei_version = export_info.get(constants.INISECT_EXP, "version")
6315 c1c31426 Iustin Pop
    if (int(ei_version) != constants.EXPORT_VERSION):
6316 c1c31426 Iustin Pop
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
6317 c1c31426 Iustin Pop
                                 (ei_version, constants.EXPORT_VERSION),
6318 c1c31426 Iustin Pop
                                 errors.ECODE_ENVIRON)
6319 c1c31426 Iustin Pop
    return export_info
6320 a8083063 Iustin Pop
6321 f276c4b5 Iustin Pop
  def _ReadExportParams(self, einfo):
6322 f276c4b5 Iustin Pop
    """Use export parameters as defaults.
6323 f276c4b5 Iustin Pop

6324 f276c4b5 Iustin Pop
    In case the opcode doesn't specify (as in override) some instance
6325 f276c4b5 Iustin Pop
    parameters, then try to use them from the export information, if
6326 f276c4b5 Iustin Pop
    that declares them.
6327 f276c4b5 Iustin Pop

6328 f276c4b5 Iustin Pop
    """
6329 b6cd72b2 Iustin Pop
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
6330 b6cd72b2 Iustin Pop
6331 f276c4b5 Iustin Pop
    if self.op.disk_template is None:
6332 f276c4b5 Iustin Pop
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
6333 f276c4b5 Iustin Pop
        self.op.disk_template = einfo.get(constants.INISECT_INS,
6334 f276c4b5 Iustin Pop
                                          "disk_template")
6335 f276c4b5 Iustin Pop
      else:
6336 f276c4b5 Iustin Pop
        raise errors.OpPrereqError("No disk template specified and the export"
6337 f276c4b5 Iustin Pop
                                   " is missing the disk_template information",
6338 f276c4b5 Iustin Pop
                                   errors.ECODE_INVAL)
6339 f276c4b5 Iustin Pop
6340 9b12ed0f Iustin Pop
    if not self.op.disks:
6341 9b12ed0f Iustin Pop
      if einfo.has_option(constants.INISECT_INS, "disk_count"):
6342 9b12ed0f Iustin Pop
        disks = []
6343 9b12ed0f Iustin Pop
        # TODO: import the disk iv_name too
6344 9b12ed0f Iustin Pop
        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
6345 9b12ed0f Iustin Pop
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
6346 9b12ed0f Iustin Pop
          disks.append({"size": disk_sz})
6347 9b12ed0f Iustin Pop
        self.op.disks = disks
6348 9b12ed0f Iustin Pop
      else:
6349 9b12ed0f Iustin Pop
        raise errors.OpPrereqError("No disk info specified and the export"
6350 9b12ed0f Iustin Pop
                                   " is missing the disk information",
6351 9b12ed0f Iustin Pop
                                   errors.ECODE_INVAL)
6352 9b12ed0f Iustin Pop
6353 0af0f641 Iustin Pop
    if (not self.op.nics and
6354 0af0f641 Iustin Pop
        einfo.has_option(constants.INISECT_INS, "nic_count")):
6355 0af0f641 Iustin Pop
      nics = []
6356 0af0f641 Iustin Pop
      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
6357 0af0f641 Iustin Pop
        ndict = {}
6358 0af0f641 Iustin Pop
        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
6359 0af0f641 Iustin Pop
          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
6360 0af0f641 Iustin Pop
          ndict[name] = v
6361 0af0f641 Iustin Pop
        nics.append(ndict)
6362 0af0f641 Iustin Pop
      self.op.nics = nics
6363 0af0f641 Iustin Pop
6364 9f88b0e8 Iustin Pop
    if (self.op.hypervisor is None and
6365 9f88b0e8 Iustin Pop
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
6366 9f88b0e8 Iustin Pop
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
6367 9f88b0e8 Iustin Pop
    if einfo.has_section(constants.INISECT_HYP):
6368 9f88b0e8 Iustin Pop
      # use the export parameters but do not override the ones
6369 9f88b0e8 Iustin Pop
      # specified by the user
6370 9f88b0e8 Iustin Pop
      for name, value in einfo.items(constants.INISECT_HYP):
6371 9f88b0e8 Iustin Pop
        if name not in self.op.hvparams:
6372 9f88b0e8 Iustin Pop
          self.op.hvparams[name] = value
6373 9f88b0e8 Iustin Pop
6374 cc0d88e9 Iustin Pop
    if einfo.has_section(constants.INISECT_BEP):
6375 cc0d88e9 Iustin Pop
      # use the parameters, without overriding
6376 cc0d88e9 Iustin Pop
      for name, value in einfo.items(constants.INISECT_BEP):
6377 cc0d88e9 Iustin Pop
        if name not in self.op.beparams:
6378 cc0d88e9 Iustin Pop
          self.op.beparams[name] = value
6379 cc0d88e9 Iustin Pop
    else:
6380 cc0d88e9 Iustin Pop
      # try to read the parameters old style, from the main section
6381 cc0d88e9 Iustin Pop
      for name in constants.BES_PARAMETERS:
6382 cc0d88e9 Iustin Pop
        if (name not in self.op.beparams and
6383 cc0d88e9 Iustin Pop
            einfo.has_option(constants.INISECT_INS, name)):
6384 cc0d88e9 Iustin Pop
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
6385 cc0d88e9 Iustin Pop
6386 e588764d Iustin Pop
  def _RevertToDefaults(self, cluster):
6387 e588764d Iustin Pop
    """Revert the instance parameters to the default values.
6388 e588764d Iustin Pop

6389 e588764d Iustin Pop
    """
6390 e588764d Iustin Pop
    # hvparams
6391 e588764d Iustin Pop
    hv_defs = cluster.GetHVDefaults(self.op.hypervisor, self.op.os_type)
6392 e588764d Iustin Pop
    for name in self.op.hvparams.keys():
6393 e588764d Iustin Pop
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
6394 e588764d Iustin Pop
        del self.op.hvparams[name]
6395 e588764d Iustin Pop
    # beparams
6396 e588764d Iustin Pop
    be_defs = cluster.beparams.get(constants.PP_DEFAULT, {})
6397 e588764d Iustin Pop
    for name in self.op.beparams.keys():
6398 e588764d Iustin Pop
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
6399 e588764d Iustin Pop
        del self.op.beparams[name]
6400 e588764d Iustin Pop
    # nic params
6401 e588764d Iustin Pop
    nic_defs = cluster.nicparams.get(constants.PP_DEFAULT, {})
6402 e588764d Iustin Pop
    for nic in self.op.nics:
6403 e588764d Iustin Pop
      for name in constants.NICS_PARAMETERS:
6404 e588764d Iustin Pop
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
6405 e588764d Iustin Pop
          del nic[name]
6406 e588764d Iustin Pop
6407 a8083063 Iustin Pop
  def CheckPrereq(self):
6408 a8083063 Iustin Pop
    """Check prerequisites.
6409 a8083063 Iustin Pop

6410 a8083063 Iustin Pop
    """
6411 c1c31426 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6412 c1c31426 Iustin Pop
      export_info = self._ReadExportInfo()
6413 f276c4b5 Iustin Pop
      self._ReadExportParams(export_info)
6414 f276c4b5 Iustin Pop
6415 f276c4b5 Iustin Pop
    _CheckDiskTemplate(self.op.disk_template)
6416 c1c31426 Iustin Pop
6417 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
6418 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
6419 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
6420 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_STATE)
6421 eedc99de Manuel Franceschini
6422 22f50b1d Iustin Pop
    if self.op.hypervisor is None:
6423 22f50b1d Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
6424 22f50b1d Iustin Pop
6425 22f50b1d Iustin Pop
    cluster = self.cfg.GetClusterInfo()
6426 22f50b1d Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
6427 22f50b1d Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
6428 22f50b1d Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
6429 22f50b1d Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
6430 22f50b1d Iustin Pop
                                  ",".join(enabled_hvs)),
6431 22f50b1d Iustin Pop
                                 errors.ECODE_STATE)
6432 22f50b1d Iustin Pop
6433 22f50b1d Iustin Pop
    # check hypervisor parameter syntax (locally)
6434 22f50b1d Iustin Pop
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
6435 b6cd72b2 Iustin Pop
    filled_hvp = objects.FillDict(cluster.GetHVDefaults(self.op.hypervisor,
6436 b6cd72b2 Iustin Pop
                                                        self.op.os_type),
6437 22f50b1d Iustin Pop
                                  self.op.hvparams)
6438 22f50b1d Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
6439 22f50b1d Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
6440 22f50b1d Iustin Pop
    self.hv_full = filled_hvp
6441 22f50b1d Iustin Pop
    # check that we don't specify global parameters on an instance
6442 22f50b1d Iustin Pop
    _CheckGlobalHvParams(self.op.hvparams)
6443 22f50b1d Iustin Pop
6444 22f50b1d Iustin Pop
    # fill and remember the beparams dict
6445 22f50b1d Iustin Pop
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
6446 22f50b1d Iustin Pop
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
6447 22f50b1d Iustin Pop
                                    self.op.beparams)
6448 22f50b1d Iustin Pop
6449 e588764d Iustin Pop
    # now that hvp/bep are in final format, let's reset to defaults,
6450 e588764d Iustin Pop
    # if told to do so
6451 e588764d Iustin Pop
    if self.op.identify_defaults:
6452 e588764d Iustin Pop
      self._RevertToDefaults(cluster)
6453 e588764d Iustin Pop
6454 22f50b1d Iustin Pop
    # NIC buildup
6455 22f50b1d Iustin Pop
    self.nics = []
6456 22f50b1d Iustin Pop
    for idx, nic in enumerate(self.op.nics):
6457 22f50b1d Iustin Pop
      nic_mode_req = nic.get("mode", None)
6458 22f50b1d Iustin Pop
      nic_mode = nic_mode_req
6459 22f50b1d Iustin Pop
      if nic_mode is None:
6460 22f50b1d Iustin Pop
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
6461 22f50b1d Iustin Pop
6462 22f50b1d Iustin Pop
      # in routed mode, for the first nic, the default ip is 'auto'
6463 22f50b1d Iustin Pop
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
6464 22f50b1d Iustin Pop
        default_ip_mode = constants.VALUE_AUTO
6465 22f50b1d Iustin Pop
      else:
6466 22f50b1d Iustin Pop
        default_ip_mode = constants.VALUE_NONE
6467 22f50b1d Iustin Pop
6468 22f50b1d Iustin Pop
      # ip validity checks
6469 22f50b1d Iustin Pop
      ip = nic.get("ip", default_ip_mode)
6470 22f50b1d Iustin Pop
      if ip is None or ip.lower() == constants.VALUE_NONE:
6471 22f50b1d Iustin Pop
        nic_ip = None
6472 22f50b1d Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
6473 22f50b1d Iustin Pop
        if not self.op.name_check:
6474 22f50b1d Iustin Pop
          raise errors.OpPrereqError("IP address set to auto but name checks"
6475 22f50b1d Iustin Pop
                                     " have been skipped. Aborting.",
6476 22f50b1d Iustin Pop
                                     errors.ECODE_INVAL)
6477 22f50b1d Iustin Pop
        nic_ip = self.hostname1.ip
6478 22f50b1d Iustin Pop
      else:
6479 22f50b1d Iustin Pop
        if not utils.IsValidIP(ip):
6480 22f50b1d Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
6481 22f50b1d Iustin Pop
                                     " like a valid IP" % ip,
6482 22f50b1d Iustin Pop
                                     errors.ECODE_INVAL)
6483 22f50b1d Iustin Pop
        nic_ip = ip
6484 22f50b1d Iustin Pop
6485 22f50b1d Iustin Pop
      # TODO: check the ip address for uniqueness
6486 22f50b1d Iustin Pop
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
6487 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Routed nic mode requires an ip address",
6488 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6489 22f50b1d Iustin Pop
6490 22f50b1d Iustin Pop
      # MAC address verification
6491 22f50b1d Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
6492 22f50b1d Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6493 22f50b1d Iustin Pop
        mac = utils.NormalizeAndValidateMac(mac)
6494 22f50b1d Iustin Pop
6495 22f50b1d Iustin Pop
        try:
6496 22f50b1d Iustin Pop
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
6497 22f50b1d Iustin Pop
        except errors.ReservationError:
6498 22f50b1d Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
6499 22f50b1d Iustin Pop
                                     " in cluster" % mac,
6500 22f50b1d Iustin Pop
                                     errors.ECODE_NOTUNIQUE)
6501 22f50b1d Iustin Pop
6502 22f50b1d Iustin Pop
      # bridge verification
6503 22f50b1d Iustin Pop
      bridge = nic.get("bridge", None)
6504 22f50b1d Iustin Pop
      link = nic.get("link", None)
6505 22f50b1d Iustin Pop
      if bridge and link:
6506 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6507 22f50b1d Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
6508 22f50b1d Iustin Pop
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
6509 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
6510 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6511 22f50b1d Iustin Pop
      elif bridge:
6512 22f50b1d Iustin Pop
        link = bridge
6513 22f50b1d Iustin Pop
6514 22f50b1d Iustin Pop
      nicparams = {}
6515 22f50b1d Iustin Pop
      if nic_mode_req:
6516 22f50b1d Iustin Pop
        nicparams[constants.NIC_MODE] = nic_mode_req
6517 22f50b1d Iustin Pop
      if link:
6518 22f50b1d Iustin Pop
        nicparams[constants.NIC_LINK] = link
6519 22f50b1d Iustin Pop
6520 22f50b1d Iustin Pop
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
6521 22f50b1d Iustin Pop
                                      nicparams)
6522 22f50b1d Iustin Pop
      objects.NIC.CheckParameterSyntax(check_params)
6523 22f50b1d Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
6524 22f50b1d Iustin Pop
6525 22f50b1d Iustin Pop
    # disk checks/pre-build
6526 22f50b1d Iustin Pop
    self.disks = []
6527 22f50b1d Iustin Pop
    for disk in self.op.disks:
6528 22f50b1d Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
6529 22f50b1d Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
6530 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
6531 22f50b1d Iustin Pop
                                   mode, errors.ECODE_INVAL)
6532 22f50b1d Iustin Pop
      size = disk.get("size", None)
6533 22f50b1d Iustin Pop
      if size is None:
6534 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
6535 22f50b1d Iustin Pop
      try:
6536 22f50b1d Iustin Pop
        size = int(size)
6537 22f50b1d Iustin Pop
      except (TypeError, ValueError):
6538 22f50b1d Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
6539 22f50b1d Iustin Pop
                                   errors.ECODE_INVAL)
6540 22f50b1d Iustin Pop
      new_disk = {"size": size, "mode": mode}
6541 22f50b1d Iustin Pop
      if "adopt" in disk:
6542 22f50b1d Iustin Pop
        new_disk["adopt"] = disk["adopt"]
6543 22f50b1d Iustin Pop
      self.disks.append(new_disk)
6544 22f50b1d Iustin Pop
6545 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
6546 a8083063 Iustin Pop
6547 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
6548 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
6549 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
6550 09acf207 Guido Trotter
      if instance_disks < export_disks:
6551 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
6552 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
6553 5c983ee5 Iustin Pop
                                   (instance_disks, export_disks),
6554 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
6555 a8083063 Iustin Pop
6556 09acf207 Guido Trotter
      disk_images = []
6557 09acf207 Guido Trotter
      for idx in range(export_disks):
6558 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
6559 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
6560 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
6561 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
6562 c1c31426 Iustin Pop
          image = utils.PathJoin(self.op.src_path, export_name)
6563 09acf207 Guido Trotter
          disk_images.append(image)
6564 09acf207 Guido Trotter
        else:
6565 09acf207 Guido Trotter
          disk_images.append(False)
6566 09acf207 Guido Trotter
6567 09acf207 Guido Trotter
      self.src_images = disk_images
6568 901a65c1 Iustin Pop
6569 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
6570 2a518543 Iustin Pop
      try:
6571 2a518543 Iustin Pop
        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
6572 2a518543 Iustin Pop
      except (TypeError, ValueError), err:
6573 2a518543 Iustin Pop
        raise errors.OpPrereqError("Invalid export file, nic_count is not"
6574 2a518543 Iustin Pop
                                   " an integer: %s" % str(err),
6575 2a518543 Iustin Pop
                                   errors.ECODE_STATE)
6576 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
6577 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
6578 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
6579 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
6580 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
6581 bc89efc3 Guido Trotter
6582 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
6583 901a65c1 Iustin Pop
6584 18c8f361 Iustin Pop
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
6585 901a65c1 Iustin Pop
    if self.op.ip_check:
6586 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
6587 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
6588 5c983ee5 Iustin Pop
                                   (self.check_ip, self.op.instance_name),
6589 5c983ee5 Iustin Pop
                                   errors.ECODE_NOTUNIQUE)
6590 901a65c1 Iustin Pop
6591 295728df Guido Trotter
    #### mac address generation
6592 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
6593 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
6594 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
6595 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
6596 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
6597 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
6598 295728df Guido Trotter
    # creation job will fail.
6599 295728df Guido Trotter
    for nic in self.nics:
6600 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6601 36b66e6e Guido Trotter
        nic.mac = self.cfg.GenerateMAC(self.proc.GetECId())
6602 295728df Guido Trotter
6603 538475ca Iustin Pop
    #### allocator run
6604 538475ca Iustin Pop
6605 538475ca Iustin Pop
    if self.op.iallocator is not None:
6606 538475ca Iustin Pop
      self._RunAllocator()
6607 0f1a06e3 Manuel Franceschini
6608 901a65c1 Iustin Pop
    #### node related checks
6609 901a65c1 Iustin Pop
6610 901a65c1 Iustin Pop
    # check primary node
6611 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
6612 7baf741d Guido Trotter
    assert self.pnode is not None, \
6613 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
6614 7527a8a4 Iustin Pop
    if pnode.offline:
6615 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
6616 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6617 733a2b6a Iustin Pop
    if pnode.drained:
6618 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
6619 5c983ee5 Iustin Pop
                                 pnode.name, errors.ECODE_STATE)
6620 7527a8a4 Iustin Pop
6621 901a65c1 Iustin Pop
    self.secondaries = []
6622 901a65c1 Iustin Pop
6623 901a65c1 Iustin Pop
    # mirror node verification
6624 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
6625 7baf741d Guido Trotter
      if self.op.snode is None:
6626 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
6627 5c983ee5 Iustin Pop
                                   " a mirror node", errors.ECODE_INVAL)
6628 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
6629 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be the"
6630 5c983ee5 Iustin Pop
                                   " primary node.", errors.ECODE_INVAL)
6631 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
6632 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
6633 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
6634 a8083063 Iustin Pop
6635 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
6636 6785674e Iustin Pop
6637 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
6638 08db7c5c Iustin Pop
                                self.disks)
6639 ed1ebc60 Guido Trotter
6640 c3589cf8 Iustin Pop
    # Check lv size requirements, if not adopting
6641 c3589cf8 Iustin Pop
    if req_size is not None and not self.adopt_disks:
6642 701384a9 Iustin Pop
      _CheckNodesFreeDisk(self, nodenames, req_size)
6643 ed1ebc60 Guido Trotter
6644 c3589cf8 Iustin Pop
    if self.adopt_disks: # instead, we must check the adoption data
6645 c3589cf8 Iustin Pop
      all_lvs = set([i["adopt"] for i in self.disks])
6646 c3589cf8 Iustin Pop
      if len(all_lvs) != len(self.disks):
6647 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
6648 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6649 c3589cf8 Iustin Pop
      for lv_name in all_lvs:
6650 c3589cf8 Iustin Pop
        try:
6651 c3589cf8 Iustin Pop
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
6652 c3589cf8 Iustin Pop
        except errors.ReservationError:
6653 c3589cf8 Iustin Pop
          raise errors.OpPrereqError("LV named %s used by another instance" %
6654 c3589cf8 Iustin Pop
                                     lv_name, errors.ECODE_NOTUNIQUE)
6655 c3589cf8 Iustin Pop
6656 c3589cf8 Iustin Pop
      node_lvs = self.rpc.call_lv_list([pnode.name],
6657 c3589cf8 Iustin Pop
                                       self.cfg.GetVGName())[pnode.name]
6658 c3589cf8 Iustin Pop
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
6659 c3589cf8 Iustin Pop
      node_lvs = node_lvs.payload
6660 c3589cf8 Iustin Pop
      delta = all_lvs.difference(node_lvs.keys())
6661 c3589cf8 Iustin Pop
      if delta:
6662 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
6663 c3589cf8 Iustin Pop
                                   utils.CommaJoin(delta),
6664 c3589cf8 Iustin Pop
                                   errors.ECODE_INVAL)
6665 c3589cf8 Iustin Pop
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
6666 c3589cf8 Iustin Pop
      if online_lvs:
6667 c3589cf8 Iustin Pop
        raise errors.OpPrereqError("Online logical volumes found, cannot"
6668 c3589cf8 Iustin Pop
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
6669 c3589cf8 Iustin Pop
                                   errors.ECODE_STATE)
6670 c3589cf8 Iustin Pop
      # update the size of disk based on what is found
6671 c3589cf8 Iustin Pop
      for dsk in self.disks:
6672 c3589cf8 Iustin Pop
        dsk["size"] = int(float(node_lvs[dsk["adopt"]][0]))
6673 c3589cf8 Iustin Pop
6674 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
6675 6785674e Iustin Pop
6676 231cd901 Iustin Pop
    _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
6677 a8083063 Iustin Pop
6678 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
6679 a8083063 Iustin Pop
6680 49ce1563 Iustin Pop
    # memory check on primary node
6681 49ce1563 Iustin Pop
    if self.op.start:
6682 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
6683 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
6684 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
6685 338e51e8 Iustin Pop
                           self.op.hypervisor)
6686 49ce1563 Iustin Pop
6687 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
6688 08896026 Iustin Pop
6689 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6690 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
6691 a8083063 Iustin Pop

6692 a8083063 Iustin Pop
    """
6693 a8083063 Iustin Pop
    instance = self.op.instance_name
6694 a8083063 Iustin Pop
    pnode_name = self.pnode.name
6695 a8083063 Iustin Pop
6696 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
6697 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
6698 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
6699 2a6469d5 Alexander Schreiber
    else:
6700 2a6469d5 Alexander Schreiber
      network_port = None
6701 58acb49d Alexander Schreiber
6702 0e3baaf3 Iustin Pop
    if constants.ENABLE_FILE_STORAGE:
6703 0e3baaf3 Iustin Pop
      # this is needed because os.path.join does not accept None arguments
6704 0e3baaf3 Iustin Pop
      if self.op.file_storage_dir is None:
6705 0e3baaf3 Iustin Pop
        string_file_storage_dir = ""
6706 0e3baaf3 Iustin Pop
      else:
6707 0e3baaf3 Iustin Pop
        string_file_storage_dir = self.op.file_storage_dir
6708 31a853d2 Iustin Pop
6709 0e3baaf3 Iustin Pop
      # build the full file storage dir path
6710 0e3baaf3 Iustin Pop
      file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
6711 0e3baaf3 Iustin Pop
                                        string_file_storage_dir, instance)
6712 2c313123 Manuel Franceschini
    else:
6713 0e3baaf3 Iustin Pop
      file_storage_dir = ""
6714 0f1a06e3 Manuel Franceschini
6715 0f1a06e3 Manuel Franceschini
6716 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
6717 a8083063 Iustin Pop
                                  self.op.disk_template,
6718 a8083063 Iustin Pop
                                  instance, pnode_name,
6719 08db7c5c Iustin Pop
                                  self.secondaries,
6720 08db7c5c Iustin Pop
                                  self.disks,
6721 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
6722 e2a65344 Iustin Pop
                                  self.op.file_driver,
6723 e2a65344 Iustin Pop
                                  0)
6724 a8083063 Iustin Pop
6725 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
6726 a8083063 Iustin Pop
                            primary_node=pnode_name,
6727 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
6728 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
6729 4978db17 Iustin Pop
                            admin_up=False,
6730 58acb49d Alexander Schreiber
                            network_port=network_port,
6731 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
6732 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
6733 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
6734 a8083063 Iustin Pop
                            )
6735 a8083063 Iustin Pop
6736 c3589cf8 Iustin Pop
    if self.adopt_disks:
6737 c3589cf8 Iustin Pop
      # rename LVs to the newly-generated names; we need to construct
6738 c3589cf8 Iustin Pop
      # 'fake' LV disks with the old data, plus the new unique_id
6739 c3589cf8 Iustin Pop
      tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
6740 c3589cf8 Iustin Pop
      rename_to = []
6741 c3589cf8 Iustin Pop
      for t_dsk, a_dsk in zip (tmp_disks, self.disks):
6742 c3589cf8 Iustin Pop
        rename_to.append(t_dsk.logical_id)
6743 c3589cf8 Iustin Pop
        t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk["adopt"])
6744 c3589cf8 Iustin Pop
        self.cfg.SetDiskID(t_dsk, pnode_name)
6745 c3589cf8 Iustin Pop
      result = self.rpc.call_blockdev_rename(pnode_name,
6746 c3589cf8 Iustin Pop
                                             zip(tmp_disks, rename_to))
6747 c3589cf8 Iustin Pop
      result.Raise("Failed to rename adoped LVs")
6748 c3589cf8 Iustin Pop
    else:
6749 c3589cf8 Iustin Pop
      feedback_fn("* creating instance disks...")
6750 796cab27 Iustin Pop
      try:
6751 c3589cf8 Iustin Pop
        _CreateDisks(self, iobj)
6752 c3589cf8 Iustin Pop
      except errors.OpExecError:
6753 c3589cf8 Iustin Pop
        self.LogWarning("Device creation failed, reverting...")
6754 c3589cf8 Iustin Pop
        try:
6755 c3589cf8 Iustin Pop
          _RemoveDisks(self, iobj)
6756 c3589cf8 Iustin Pop
        finally:
6757 c3589cf8 Iustin Pop
          self.cfg.ReleaseDRBDMinors(instance)
6758 c3589cf8 Iustin Pop
          raise
6759 a8083063 Iustin Pop
6760 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
6761 a8083063 Iustin Pop
6762 0debfb35 Guido Trotter
    self.cfg.AddInstance(iobj, self.proc.GetECId())
6763 0debfb35 Guido Trotter
6764 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
6765 7baf741d Guido Trotter
    # added the instance to the config
6766 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
6767 e36e96b4 Guido Trotter
    # Unlock all the nodes
6768 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
6769 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
6770 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
6771 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
6772 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
6773 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
6774 9c8971d7 Guido Trotter
    else:
6775 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
6776 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6777 a8083063 Iustin Pop
6778 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6779 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6780 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6781 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6782 a8083063 Iustin Pop
      time.sleep(15)
6783 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6784 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6785 a8083063 Iustin Pop
    else:
6786 a8083063 Iustin Pop
      disk_abort = False
6787 a8083063 Iustin Pop
6788 a8083063 Iustin Pop
    if disk_abort:
6789 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6790 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6791 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6792 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6793 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6794 3ecf6786 Iustin Pop
                               " this instance")
6795 a8083063 Iustin Pop
6796 c3589cf8 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
6797 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6798 25a8792c Iustin Pop
        if not self.op.no_install:
6799 25a8792c Iustin Pop
          feedback_fn("* running the instance OS create scripts...")
6800 25a8792c Iustin Pop
          # FIXME: pass debug option from opcode to backend
6801 25a8792c Iustin Pop
          result = self.rpc.call_instance_os_add(pnode_name, iobj, False,
6802 25a8792c Iustin Pop
                                                 self.op.debug_level)
6803 25a8792c Iustin Pop
          result.Raise("Could not add os for instance %s"
6804 25a8792c Iustin Pop
                       " on node %s" % (instance, pnode_name))
6805 a8083063 Iustin Pop
6806 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6807 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6808 a8083063 Iustin Pop
        src_node = self.op.src_node
6809 09acf207 Guido Trotter
        src_images = self.src_images
6810 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6811 4a0e011f Iustin Pop
        # FIXME: pass debug option from opcode to backend
6812 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6813 09acf207 Guido Trotter
                                                         src_node, src_images,
6814 dd713605 Iustin Pop
                                                         cluster_name,
6815 dd713605 Iustin Pop
                                                         self.op.debug_level)
6816 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6817 944bf548 Iustin Pop
        if msg:
6818 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6819 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6820 a8083063 Iustin Pop
      else:
6821 a8083063 Iustin Pop
        # also checked in the prereq part
6822 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6823 3ecf6786 Iustin Pop
                                     % self.op.mode)
6824 a8083063 Iustin Pop
6825 a8083063 Iustin Pop
    if self.op.start:
6826 4978db17 Iustin Pop
      iobj.admin_up = True
6827 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6828 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6829 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6830 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6831 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6832 a8083063 Iustin Pop
6833 08896026 Iustin Pop
    return list(iobj.all_nodes)
6834 08896026 Iustin Pop
6835 a8083063 Iustin Pop
6836 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6837 a8083063 Iustin Pop
  """Connect to an instance's console.
6838 a8083063 Iustin Pop

6839 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6840 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6841 a8083063 Iustin Pop
  console.
6842 a8083063 Iustin Pop

6843 a8083063 Iustin Pop
  """
6844 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6845 8659b73e Guido Trotter
  REQ_BGL = False
6846 8659b73e Guido Trotter
6847 8659b73e Guido Trotter
  def ExpandNames(self):
6848 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6849 a8083063 Iustin Pop
6850 a8083063 Iustin Pop
  def CheckPrereq(self):
6851 a8083063 Iustin Pop
    """Check prerequisites.
6852 a8083063 Iustin Pop

6853 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6854 a8083063 Iustin Pop

6855 a8083063 Iustin Pop
    """
6856 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6857 8659b73e Guido Trotter
    assert self.instance is not None, \
6858 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6859 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6860 a8083063 Iustin Pop
6861 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6862 a8083063 Iustin Pop
    """Connect to the console of an instance
6863 a8083063 Iustin Pop

6864 a8083063 Iustin Pop
    """
6865 a8083063 Iustin Pop
    instance = self.instance
6866 a8083063 Iustin Pop
    node = instance.primary_node
6867 a8083063 Iustin Pop
6868 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6869 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6870 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6871 a8083063 Iustin Pop
6872 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6873 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6874 a8083063 Iustin Pop
6875 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6876 a8083063 Iustin Pop
6877 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6878 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6879 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6880 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6881 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6882 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6883 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6884 b047857b Michael Hanselmann
6885 82122173 Iustin Pop
    # build ssh cmdline
6886 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6887 a8083063 Iustin Pop
6888 a8083063 Iustin Pop
6889 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6890 a8083063 Iustin Pop
  """Replace the disks of an instance.
6891 a8083063 Iustin Pop

6892 a8083063 Iustin Pop
  """
6893 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6894 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6895 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6896 efd990e4 Guido Trotter
  REQ_BGL = False
6897 efd990e4 Guido Trotter
6898 7e9366f7 Iustin Pop
  def CheckArguments(self):
6899 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6900 efd990e4 Guido Trotter
      self.op.remote_node = None
6901 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6902 7e9366f7 Iustin Pop
      self.op.iallocator = None
6903 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6904 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6905 7e9366f7 Iustin Pop
6906 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6907 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6908 7e9366f7 Iustin Pop
6909 7e9366f7 Iustin Pop
  def ExpandNames(self):
6910 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6911 7e9366f7 Iustin Pop
6912 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6913 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6914 2bb5c911 Michael Hanselmann
6915 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6916 cf26a87a Iustin Pop
      remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6917 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6918 2bb5c911 Michael Hanselmann
6919 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6920 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6921 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6922 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6923 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6924 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6925 2bb5c911 Michael Hanselmann
6926 efd990e4 Guido Trotter
    else:
6927 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6928 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6929 efd990e4 Guido Trotter
6930 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6931 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6932 7ea7bcf6 Iustin Pop
                                   self.op.disks, False, self.op.early_release)
6933 c68174b6 Michael Hanselmann
6934 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6935 2bb5c911 Michael Hanselmann
6936 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6937 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6938 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6939 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6940 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6941 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6942 a8083063 Iustin Pop
6943 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6944 a8083063 Iustin Pop
    """Build hooks env.
6945 a8083063 Iustin Pop

6946 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6947 a8083063 Iustin Pop

6948 a8083063 Iustin Pop
    """
6949 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6950 a8083063 Iustin Pop
    env = {
6951 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6952 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6953 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6954 a8083063 Iustin Pop
      }
6955 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6956 0834c866 Iustin Pop
    nl = [
6957 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6958 2bb5c911 Michael Hanselmann
      instance.primary_node,
6959 0834c866 Iustin Pop
      ]
6960 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6961 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6962 a8083063 Iustin Pop
    return env, nl, nl
6963 a8083063 Iustin Pop
6964 2bb5c911 Michael Hanselmann
6965 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6966 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6967 7ffc5a86 Michael Hanselmann

6968 7ffc5a86 Michael Hanselmann
  """
6969 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6970 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6971 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6972 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6973 7ffc5a86 Michael Hanselmann
6974 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6975 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6976 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6977 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6978 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6979 7ea7bcf6 Iustin Pop
    if not hasattr(self.op, "early_release"):
6980 7ea7bcf6 Iustin Pop
      self.op.early_release = False
6981 7ffc5a86 Michael Hanselmann
6982 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6983 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6984 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6985 7ffc5a86 Michael Hanselmann
6986 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6987 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
6988 7ffc5a86 Michael Hanselmann
6989 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6990 7ffc5a86 Michael Hanselmann
6991 7ffc5a86 Michael Hanselmann
    # Declare node locks
6992 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6993 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6994 7ffc5a86 Michael Hanselmann
6995 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6996 cf26a87a Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
6997 7ffc5a86 Michael Hanselmann
6998 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6999 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
7000 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
7001 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
7002 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
7003 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
7004 7ffc5a86 Michael Hanselmann
7005 7ffc5a86 Michael Hanselmann
    else:
7006 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
7007 7ffc5a86 Michael Hanselmann
7008 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
7009 7ffc5a86 Michael Hanselmann
    # node
7010 7ffc5a86 Michael Hanselmann
    names = []
7011 3a012b41 Michael Hanselmann
    tasklets = []
7012 7ffc5a86 Michael Hanselmann
7013 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
7014 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
7015 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
7016 7ffc5a86 Michael Hanselmann
7017 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
7018 94a1b377 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [],
7019 7ea7bcf6 Iustin Pop
                                True, self.op.early_release)
7020 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
7021 7ffc5a86 Michael Hanselmann
7022 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
7023 7ffc5a86 Michael Hanselmann
    self.instance_names = names
7024 7ffc5a86 Michael Hanselmann
7025 7ffc5a86 Michael Hanselmann
    # Declare instance locks
7026 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
7027 7ffc5a86 Michael Hanselmann
7028 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
7029 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
7030 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
7031 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
7032 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
7033 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
7034 7ffc5a86 Michael Hanselmann
7035 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
7036 7ffc5a86 Michael Hanselmann
    """Build hooks env.
7037 7ffc5a86 Michael Hanselmann

7038 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
7039 7ffc5a86 Michael Hanselmann

7040 7ffc5a86 Michael Hanselmann
    """
7041 7ffc5a86 Michael Hanselmann
    env = {
7042 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
7043 7ffc5a86 Michael Hanselmann
      }
7044 7ffc5a86 Michael Hanselmann
7045 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
7046 7ffc5a86 Michael Hanselmann
7047 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
7048 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
7049 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
7050 7ffc5a86 Michael Hanselmann
7051 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
7052 7ffc5a86 Michael Hanselmann
7053 7ffc5a86 Michael Hanselmann
7054 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
7055 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
7056 2bb5c911 Michael Hanselmann

7057 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
7058 2bb5c911 Michael Hanselmann

7059 2bb5c911 Michael Hanselmann
  """
7060 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
7061 7ea7bcf6 Iustin Pop
               disks, delay_iallocator, early_release):
7062 2bb5c911 Michael Hanselmann
    """Initializes this class.
7063 2bb5c911 Michael Hanselmann

7064 2bb5c911 Michael Hanselmann
    """
7065 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
7066 464243a7 Michael Hanselmann
7067 2bb5c911 Michael Hanselmann
    # Parameters
7068 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
7069 2bb5c911 Michael Hanselmann
    self.mode = mode
7070 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
7071 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
7072 2bb5c911 Michael Hanselmann
    self.disks = disks
7073 94a1b377 Michael Hanselmann
    self.delay_iallocator = delay_iallocator
7074 7ea7bcf6 Iustin Pop
    self.early_release = early_release
7075 2bb5c911 Michael Hanselmann
7076 2bb5c911 Michael Hanselmann
    # Runtime data
7077 2bb5c911 Michael Hanselmann
    self.instance = None
7078 2bb5c911 Michael Hanselmann
    self.new_node = None
7079 2bb5c911 Michael Hanselmann
    self.target_node = None
7080 2bb5c911 Michael Hanselmann
    self.other_node = None
7081 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
7082 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
7083 2bb5c911 Michael Hanselmann
7084 2bb5c911 Michael Hanselmann
  @staticmethod
7085 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
7086 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
7087 c68174b6 Michael Hanselmann

7088 c68174b6 Michael Hanselmann
    """
7089 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
7090 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
7091 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
7092 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
7093 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
7094 5c983ee5 Iustin Pop
                                   " new node given", errors.ECODE_INVAL)
7095 02a00186 Michael Hanselmann
7096 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
7097 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
7098 5c983ee5 Iustin Pop
                                   " secondary, not both", errors.ECODE_INVAL)
7099 02a00186 Michael Hanselmann
7100 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
7101 02a00186 Michael Hanselmann
      # Not replacing the secondary
7102 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
7103 02a00186 Michael Hanselmann
                                 " only be used when changing the"
7104 5c983ee5 Iustin Pop
                                 " secondary node", errors.ECODE_INVAL)
7105 2bb5c911 Michael Hanselmann
7106 2bb5c911 Michael Hanselmann
  @staticmethod
7107 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
7108 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
7109 2bb5c911 Michael Hanselmann

7110 2bb5c911 Michael Hanselmann
    """
7111 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
7112 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
7113 2bb5c911 Michael Hanselmann
                     name=instance_name,
7114 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
7115 2bb5c911 Michael Hanselmann
7116 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
7117 2bb5c911 Michael Hanselmann
7118 2bb5c911 Michael Hanselmann
    if not ial.success:
7119 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
7120 5c983ee5 Iustin Pop
                                 " %s" % (iallocator_name, ial.info),
7121 5c983ee5 Iustin Pop
                                 errors.ECODE_NORES)
7122 2bb5c911 Michael Hanselmann
7123 680f0a89 Iustin Pop
    if len(ial.result) != ial.required_nodes:
7124 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
7125 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
7126 d984846d Iustin Pop
                                 (iallocator_name,
7127 680f0a89 Iustin Pop
                                  len(ial.result), ial.required_nodes),
7128 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
7129 2bb5c911 Michael Hanselmann
7130 680f0a89 Iustin Pop
    remote_node_name = ial.result[0]
7131 2bb5c911 Michael Hanselmann
7132 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
7133 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
7134 2bb5c911 Michael Hanselmann
7135 2bb5c911 Michael Hanselmann
    return remote_node_name
7136 2bb5c911 Michael Hanselmann
7137 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
7138 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
7139 2d9005d8 Michael Hanselmann
                                    node_name, True)
7140 942be002 Michael Hanselmann
7141 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
7142 2bb5c911 Michael Hanselmann
    """Check prerequisites.
7143 2bb5c911 Michael Hanselmann

7144 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
7145 2bb5c911 Michael Hanselmann

7146 2bb5c911 Michael Hanselmann
    """
7147 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
7148 e9022531 Iustin Pop
    assert instance is not None, \
7149 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
7150 2bb5c911 Michael Hanselmann
7151 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
7152 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
7153 5c983ee5 Iustin Pop
                                 " instances", errors.ECODE_INVAL)
7154 a8083063 Iustin Pop
7155 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7156 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
7157 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
7158 5c983ee5 Iustin Pop
                                 len(instance.secondary_nodes),
7159 5c983ee5 Iustin Pop
                                 errors.ECODE_FAULT)
7160 a8083063 Iustin Pop
7161 94a1b377 Michael Hanselmann
    if not self.delay_iallocator:
7162 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7163 94a1b377 Michael Hanselmann
7164 94a1b377 Michael Hanselmann
  def _CheckPrereq2(self):
7165 94a1b377 Michael Hanselmann
    """Check prerequisites, second part.
7166 94a1b377 Michael Hanselmann

7167 94a1b377 Michael Hanselmann
    This function should always be part of CheckPrereq. It was separated and is
7168 94a1b377 Michael Hanselmann
    now called from Exec because during node evacuation iallocator was only
7169 94a1b377 Michael Hanselmann
    called with an unmodified cluster model, not taking planned changes into
7170 94a1b377 Michael Hanselmann
    account.
7171 94a1b377 Michael Hanselmann

7172 94a1b377 Michael Hanselmann
    """
7173 94a1b377 Michael Hanselmann
    instance = self.instance
7174 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
7175 a9e0c397 Iustin Pop
7176 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
7177 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
7178 2bb5c911 Michael Hanselmann
    else:
7179 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
7180 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
7181 b6e82a65 Iustin Pop
7182 a9e0c397 Iustin Pop
    if remote_node is not None:
7183 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
7184 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
7185 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
7186 a9e0c397 Iustin Pop
    else:
7187 a9e0c397 Iustin Pop
      self.remote_node_info = None
7188 2bb5c911 Michael Hanselmann
7189 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
7190 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
7191 5c983ee5 Iustin Pop
                                 " the instance.", errors.ECODE_INVAL)
7192 2bb5c911 Michael Hanselmann
7193 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
7194 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
7195 5c983ee5 Iustin Pop
                                 " secondary node of the instance.",
7196 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7197 7e9366f7 Iustin Pop
7198 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
7199 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
7200 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
7201 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7202 942be002 Michael Hanselmann
7203 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
7204 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
7205 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
7206 942be002 Michael Hanselmann
7207 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
7208 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
7209 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
7210 5c983ee5 Iustin Pop
                                   " automatically" % self.instance_name,
7211 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
7212 942be002 Michael Hanselmann
7213 942be002 Michael Hanselmann
      if faulty_primary:
7214 942be002 Michael Hanselmann
        self.disks = faulty_primary
7215 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7216 942be002 Michael Hanselmann
        self.other_node = secondary_node
7217 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7218 942be002 Michael Hanselmann
      elif faulty_secondary:
7219 942be002 Michael Hanselmann
        self.disks = faulty_secondary
7220 942be002 Michael Hanselmann
        self.target_node = secondary_node
7221 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7222 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7223 942be002 Michael Hanselmann
      else:
7224 942be002 Michael Hanselmann
        self.disks = []
7225 942be002 Michael Hanselmann
        check_nodes = []
7226 942be002 Michael Hanselmann
7227 942be002 Michael Hanselmann
    else:
7228 942be002 Michael Hanselmann
      # Non-automatic modes
7229 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
7230 e9022531 Iustin Pop
        self.target_node = instance.primary_node
7231 942be002 Michael Hanselmann
        self.other_node = secondary_node
7232 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7233 7e9366f7 Iustin Pop
7234 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
7235 942be002 Michael Hanselmann
        self.target_node = secondary_node
7236 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7237 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
7238 a9e0c397 Iustin Pop
7239 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
7240 942be002 Michael Hanselmann
        self.new_node = remote_node
7241 e9022531 Iustin Pop
        self.other_node = instance.primary_node
7242 942be002 Michael Hanselmann
        self.target_node = secondary_node
7243 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
7244 54155f52 Iustin Pop
7245 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
7246 a8083063 Iustin Pop
7247 9af0fa6a Iustin Pop
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
7248 9af0fa6a Iustin Pop
        assert old_node_info is not None
7249 9af0fa6a Iustin Pop
        if old_node_info.offline and not self.early_release:
7250 9af0fa6a Iustin Pop
          # doesn't make sense to delay the release
7251 9af0fa6a Iustin Pop
          self.early_release = True
7252 9af0fa6a Iustin Pop
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
7253 9af0fa6a Iustin Pop
                          " early-release mode", secondary_node)
7254 9af0fa6a Iustin Pop
7255 942be002 Michael Hanselmann
      else:
7256 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
7257 942be002 Michael Hanselmann
                                     self.mode)
7258 942be002 Michael Hanselmann
7259 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
7260 942be002 Michael Hanselmann
      if not self.disks:
7261 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
7262 a9e0c397 Iustin Pop
7263 2bb5c911 Michael Hanselmann
    for node in check_nodes:
7264 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
7265 e4376078 Iustin Pop
7266 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
7267 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
7268 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
7269 e4376078 Iustin Pop
7270 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
7271 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
7272 e4376078 Iustin Pop
7273 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
7274 2bb5c911 Michael Hanselmann
      if node_name is not None:
7275 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
7276 e4376078 Iustin Pop
7277 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
7278 a9e0c397 Iustin Pop
7279 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
7280 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
7281 2bb5c911 Michael Hanselmann

7282 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
7283 cff90b79 Iustin Pop

7284 a9e0c397 Iustin Pop
    """
7285 94a1b377 Michael Hanselmann
    if self.delay_iallocator:
7286 94a1b377 Michael Hanselmann
      self._CheckPrereq2()
7287 94a1b377 Michael Hanselmann
7288 942be002 Michael Hanselmann
    if not self.disks:
7289 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
7290 942be002 Michael Hanselmann
      return
7291 942be002 Michael Hanselmann
7292 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
7293 1f864b60 Iustin Pop
                (utils.CommaJoin(self.disks), self.instance.name))
7294 7ffc5a86 Michael Hanselmann
7295 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
7296 2bb5c911 Michael Hanselmann
7297 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
7298 2bb5c911 Michael Hanselmann
    if activate_disks:
7299 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
7300 2bb5c911 Michael Hanselmann
7301 2bb5c911 Michael Hanselmann
    try:
7302 942be002 Michael Hanselmann
      # Should we replace the secondary node?
7303 942be002 Michael Hanselmann
      if self.new_node is not None:
7304 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
7305 2bb5c911 Michael Hanselmann
      else:
7306 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
7307 a4eae71f Michael Hanselmann
7308 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
7309 2bb5c911 Michael Hanselmann
7310 2bb5c911 Michael Hanselmann
    finally:
7311 5c983ee5 Iustin Pop
      # Deactivate the instance disks if we're replacing them on a
7312 5c983ee5 Iustin Pop
      # down instance
7313 2bb5c911 Michael Hanselmann
      if activate_disks:
7314 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
7315 2bb5c911 Michael Hanselmann
7316 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
7317 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
7318 2bb5c911 Michael Hanselmann
7319 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
7320 cff90b79 Iustin Pop
7321 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
7322 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
7323 cff90b79 Iustin Pop
    if not results:
7324 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
7325 2bb5c911 Michael Hanselmann
7326 2bb5c911 Michael Hanselmann
    for node in nodes:
7327 781de953 Iustin Pop
      res = results[node]
7328 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
7329 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
7330 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
7331 2bb5c911 Michael Hanselmann
                                 (vgname, node))
7332 2bb5c911 Michael Hanselmann
7333 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
7334 2bb5c911 Michael Hanselmann
    # Check disk existence
7335 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7336 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7337 cff90b79 Iustin Pop
        continue
7338 2bb5c911 Michael Hanselmann
7339 2bb5c911 Michael Hanselmann
      for node in nodes:
7340 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
7341 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
7342 2bb5c911 Michael Hanselmann
7343 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
7344 2bb5c911 Michael Hanselmann
7345 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7346 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
7347 2bb5c911 Michael Hanselmann
          if not msg:
7348 2bb5c911 Michael Hanselmann
            msg = "disk not found"
7349 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
7350 23829f6f Iustin Pop
                                   (idx, node, msg))
7351 cff90b79 Iustin Pop
7352 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
7353 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7354 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7355 cff90b79 Iustin Pop
        continue
7356 cff90b79 Iustin Pop
7357 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
7358 2bb5c911 Michael Hanselmann
                      (idx, node_name))
7359 2bb5c911 Michael Hanselmann
7360 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
7361 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
7362 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
7363 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
7364 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
7365 2bb5c911 Michael Hanselmann
7366 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
7367 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
7368 2bb5c911 Michael Hanselmann
    iv_names = {}
7369 2bb5c911 Michael Hanselmann
7370 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7371 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
7372 a9e0c397 Iustin Pop
        continue
7373 2bb5c911 Michael Hanselmann
7374 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
7375 2bb5c911 Michael Hanselmann
7376 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7377 2bb5c911 Michael Hanselmann
7378 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
7379 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
7380 2bb5c911 Michael Hanselmann
7381 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
7382 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
7383 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
7384 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
7385 2bb5c911 Michael Hanselmann
7386 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
7387 a9e0c397 Iustin Pop
      old_lvs = dev.children
7388 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
7389 2bb5c911 Michael Hanselmann
7390 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
7391 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
7392 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
7393 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7394 2bb5c911 Michael Hanselmann
7395 2bb5c911 Michael Hanselmann
    return iv_names
7396 2bb5c911 Michael Hanselmann
7397 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
7398 1122eb25 Iustin Pop
    for name, (dev, _, _) in iv_names.iteritems():
7399 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
7400 2bb5c911 Michael Hanselmann
7401 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
7402 2bb5c911 Michael Hanselmann
7403 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
7404 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
7405 2bb5c911 Michael Hanselmann
        if not msg:
7406 2bb5c911 Michael Hanselmann
          msg = "disk not found"
7407 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
7408 2bb5c911 Michael Hanselmann
                                 (name, msg))
7409 2bb5c911 Michael Hanselmann
7410 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
7411 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
7412 2bb5c911 Michael Hanselmann
7413 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
7414 1122eb25 Iustin Pop
    for name, (_, old_lvs, _) in iv_names.iteritems():
7415 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
7416 2bb5c911 Michael Hanselmann
7417 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
7418 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
7419 2bb5c911 Michael Hanselmann
7420 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
7421 2bb5c911 Michael Hanselmann
        if msg:
7422 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
7423 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
7424 2bb5c911 Michael Hanselmann
7425 7ea7bcf6 Iustin Pop
  def _ReleaseNodeLock(self, node_name):
7426 7ea7bcf6 Iustin Pop
    """Releases the lock for a given node."""
7427 7ea7bcf6 Iustin Pop
    self.lu.context.glm.release(locking.LEVEL_NODE, node_name)
7428 7ea7bcf6 Iustin Pop
7429 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
7430 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
7431 2bb5c911 Michael Hanselmann

7432 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
7433 2bb5c911 Michael Hanselmann

7434 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
7435 2bb5c911 Michael Hanselmann

7436 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
7437 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
7438 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
7439 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
7440 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
7441 2bb5c911 Michael Hanselmann

7442 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
7443 2bb5c911 Michael Hanselmann

7444 2bb5c911 Michael Hanselmann
      1. for each modified disk:
7445 2bb5c911 Michael Hanselmann

7446 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
7447 2bb5c911 Michael Hanselmann

7448 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
7449 2bb5c911 Michael Hanselmann

7450 2bb5c911 Michael Hanselmann
    """
7451 2bb5c911 Michael Hanselmann
    steps_total = 6
7452 2bb5c911 Michael Hanselmann
7453 2bb5c911 Michael Hanselmann
    # Step: check device activation
7454 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7455 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
7456 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
7457 2bb5c911 Michael Hanselmann
7458 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
7459 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7460 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
7461 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
7462 2bb5c911 Michael Hanselmann
                                False)
7463 2bb5c911 Michael Hanselmann
7464 2bb5c911 Michael Hanselmann
    # Step: create new storage
7465 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7466 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
7467 a9e0c397 Iustin Pop
7468 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
7469 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7470 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
7471 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
7472 2bb5c911 Michael Hanselmann
7473 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
7474 4d4a651d Michael Hanselmann
                                                     old_lvs)
7475 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
7476 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
7477 cff90b79 Iustin Pop
      #dev.children = []
7478 cff90b79 Iustin Pop
      #cfg.Update(instance)
7479 a9e0c397 Iustin Pop
7480 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
7481 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
7482 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
7483 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
7484 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
7485 cff90b79 Iustin Pop
7486 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
7487 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
7488 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
7489 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
7490 2bb5c911 Michael Hanselmann
7491 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
7492 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
7493 cff90b79 Iustin Pop
      for to_ren in old_lvs:
7494 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
7495 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
7496 23829f6f Iustin Pop
          # device exists
7497 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
7498 cff90b79 Iustin Pop
7499 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
7500 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7501 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
7502 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
7503 2bb5c911 Michael Hanselmann
7504 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
7505 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
7506 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
7507 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
7508 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
7509 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
7510 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
7511 cff90b79 Iustin Pop
7512 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
7513 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
7514 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
7515 a9e0c397 Iustin Pop
7516 cff90b79 Iustin Pop
      for disk in old_lvs:
7517 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
7518 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
7519 a9e0c397 Iustin Pop
7520 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
7521 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
7522 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
7523 4d4a651d Michael Hanselmann
                                                  new_lvs)
7524 4c4e4e1e Iustin Pop
      msg = result.fail_msg
7525 2cc1da8b Iustin Pop
      if msg:
7526 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
7527 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
7528 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
7529 4c4e4e1e Iustin Pop
          if msg2:
7530 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
7531 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
7532 2bb5c911 Michael Hanselmann
                                     "volumes"))
7533 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
7534 a9e0c397 Iustin Pop
7535 a9e0c397 Iustin Pop
      dev.children = new_lvs
7536 a9e0c397 Iustin Pop
7537 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
7538 a9e0c397 Iustin Pop
7539 7ea7bcf6 Iustin Pop
    cstep = 5
7540 7ea7bcf6 Iustin Pop
    if self.early_release:
7541 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7542 7ea7bcf6 Iustin Pop
      cstep += 1
7543 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7544 d5cd389c Iustin Pop
      # WARNING: we release both node locks here, do not do other RPCs
7545 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7546 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.target_node, self.other_node])
7547 7ea7bcf6 Iustin Pop
7548 2bb5c911 Michael Hanselmann
    # Wait for sync
7549 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7550 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7551 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7552 7ea7bcf6 Iustin Pop
    cstep += 1
7553 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7554 a9e0c397 Iustin Pop
7555 2bb5c911 Michael Hanselmann
    # Check all devices manually
7556 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7557 a9e0c397 Iustin Pop
7558 cff90b79 Iustin Pop
    # Step: remove old storage
7559 7ea7bcf6 Iustin Pop
    if not self.early_release:
7560 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7561 7ea7bcf6 Iustin Pop
      cstep += 1
7562 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7563 a9e0c397 Iustin Pop
7564 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
7565 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
7566 a9e0c397 Iustin Pop

7567 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
7568 a9e0c397 Iustin Pop
      - for all disks of the instance:
7569 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
7570 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
7571 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
7572 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
7573 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
7574 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
7575 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
7576 a9e0c397 Iustin Pop
          not network enabled
7577 a9e0c397 Iustin Pop
      - wait for sync across all devices
7578 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
7579 a9e0c397 Iustin Pop

7580 a9e0c397 Iustin Pop
    Failures are not very well handled.
7581 0834c866 Iustin Pop

7582 a9e0c397 Iustin Pop
    """
7583 0834c866 Iustin Pop
    steps_total = 6
7584 0834c866 Iustin Pop
7585 0834c866 Iustin Pop
    # Step: check device activation
7586 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
7587 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
7588 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
7589 0834c866 Iustin Pop
7590 0834c866 Iustin Pop
    # Step: check other node consistency
7591 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
7592 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
7593 0834c866 Iustin Pop
7594 0834c866 Iustin Pop
    # Step: create new storage
7595 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
7596 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7597 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
7598 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
7599 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
7600 a9e0c397 Iustin Pop
      for new_lv in dev.children:
7601 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
7602 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
7603 a9e0c397 Iustin Pop
7604 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
7605 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
7606 a1578d63 Iustin Pop
    # error and the success paths
7607 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
7608 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
7609 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
7610 2bb5c911 Michael Hanselmann
                                        self.instance.name)
7611 099c52ad Iustin Pop
    logging.debug("Allocated minors %r", minors)
7612 2bb5c911 Michael Hanselmann
7613 2bb5c911 Michael Hanselmann
    iv_names = {}
7614 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
7615 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
7616 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
7617 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
7618 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
7619 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
7620 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
7621 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
7622 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
7623 a2d59d8b Iustin Pop
        p_minor = o_minor1
7624 ffa1c0dc Iustin Pop
      else:
7625 1122eb25 Iustin Pop
        assert self.instance.primary_node == o_node2, "Three-node instance?"
7626 a2d59d8b Iustin Pop
        p_minor = o_minor2
7627 a2d59d8b Iustin Pop
7628 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
7629 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
7630 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
7631 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
7632 a2d59d8b Iustin Pop
7633 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
7634 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
7635 a2d59d8b Iustin Pop
                    new_net_id)
7636 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
7637 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
7638 8a6c7011 Iustin Pop
                              children=dev.children,
7639 8a6c7011 Iustin Pop
                              size=dev.size)
7640 796cab27 Iustin Pop
      try:
7641 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
7642 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
7643 82759cb1 Iustin Pop
      except errors.GenericError:
7644 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
7645 796cab27 Iustin Pop
        raise
7646 a9e0c397 Iustin Pop
7647 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
7648 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
7649 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
7650 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
7651 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
7652 cacfd1fd Iustin Pop
      if msg:
7653 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
7654 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
7655 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
7656 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
7657 a9e0c397 Iustin Pop
7658 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
7659 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
7660 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
7661 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
7662 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
7663 642445d9 Iustin Pop
7664 4c4e4e1e Iustin Pop
    msg = result.fail_msg
7665 a2d59d8b Iustin Pop
    if msg:
7666 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
7667 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
7668 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
7669 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
7670 642445d9 Iustin Pop
7671 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
7672 642445d9 Iustin Pop
    # the instance to point to the new secondary
7673 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
7674 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
7675 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
7676 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
7677 2bb5c911 Michael Hanselmann
7678 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
7679 a9e0c397 Iustin Pop
7680 642445d9 Iustin Pop
    # and now perform the drbd attach
7681 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
7682 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
7683 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
7684 4d4a651d Michael Hanselmann
                                            self.new_node],
7685 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
7686 4d4a651d Michael Hanselmann
                                           self.instance.disks,
7687 4d4a651d Michael Hanselmann
                                           self.instance.name,
7688 a2d59d8b Iustin Pop
                                           False)
7689 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
7690 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
7691 a2d59d8b Iustin Pop
      if msg:
7692 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
7693 4d4a651d Michael Hanselmann
                           to_node, msg,
7694 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
7695 2bb5c911 Michael Hanselmann
                                 " status of disks"))
7696 7ea7bcf6 Iustin Pop
    cstep = 5
7697 7ea7bcf6 Iustin Pop
    if self.early_release:
7698 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7699 7ea7bcf6 Iustin Pop
      cstep += 1
7700 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7701 d5cd389c Iustin Pop
      # WARNING: we release all node locks here, do not do other RPCs
7702 d5cd389c Iustin Pop
      # than WaitForSync to the primary node
7703 d5cd389c Iustin Pop
      self._ReleaseNodeLock([self.instance.primary_node,
7704 d5cd389c Iustin Pop
                             self.target_node,
7705 d5cd389c Iustin Pop
                             self.new_node])
7706 a9e0c397 Iustin Pop
7707 2bb5c911 Michael Hanselmann
    # Wait for sync
7708 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
7709 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
7710 7ea7bcf6 Iustin Pop
    self.lu.LogStep(cstep, steps_total, "Sync devices")
7711 7ea7bcf6 Iustin Pop
    cstep += 1
7712 b6c07b79 Michael Hanselmann
    _WaitForSync(self.lu, self.instance)
7713 a9e0c397 Iustin Pop
7714 2bb5c911 Michael Hanselmann
    # Check all devices manually
7715 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
7716 22985314 Guido Trotter
7717 2bb5c911 Michael Hanselmann
    # Step: remove old storage
7718 7ea7bcf6 Iustin Pop
    if not self.early_release:
7719 7ea7bcf6 Iustin Pop
      self.lu.LogStep(cstep, steps_total, "Removing old storage")
7720 7ea7bcf6 Iustin Pop
      self._RemoveOldStorage(self.target_node, iv_names)
7721 a9e0c397 Iustin Pop
7722 a8083063 Iustin Pop
7723 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
7724 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
7725 76aef8fc Michael Hanselmann

7726 76aef8fc Michael Hanselmann
  """
7727 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
7728 76aef8fc Michael Hanselmann
  REQ_BGL = False
7729 76aef8fc Michael Hanselmann
7730 76aef8fc Michael Hanselmann
  def CheckArguments(self):
7731 cf26a87a Iustin Pop
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
7732 76aef8fc Michael Hanselmann
7733 0e3baaf3 Iustin Pop
    _CheckStorageType(self.op.storage_type)
7734 0e3baaf3 Iustin Pop
7735 76aef8fc Michael Hanselmann
  def ExpandNames(self):
7736 76aef8fc Michael Hanselmann
    self.needed_locks = {
7737 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
7738 76aef8fc Michael Hanselmann
      }
7739 76aef8fc Michael Hanselmann
7740 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
7741 7e9c6a78 Iustin Pop
    """Ensure faulty disks abort the opcode or at least warn."""
7742 7e9c6a78 Iustin Pop
    try:
7743 7e9c6a78 Iustin Pop
      if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
7744 7e9c6a78 Iustin Pop
                                  node_name, True):
7745 7e9c6a78 Iustin Pop
        raise errors.OpPrereqError("Instance '%s' has faulty disks on"
7746 7e9c6a78 Iustin Pop
                                   " node '%s'" % (instance.name, node_name),
7747 7e9c6a78 Iustin Pop
                                   errors.ECODE_STATE)
7748 7e9c6a78 Iustin Pop
    except errors.OpPrereqError, err:
7749 7e9c6a78 Iustin Pop
      if self.op.ignore_consistency:
7750 7e9c6a78 Iustin Pop
        self.proc.LogWarning(str(err.args[0]))
7751 7e9c6a78 Iustin Pop
      else:
7752 7e9c6a78 Iustin Pop
        raise
7753 76aef8fc Michael Hanselmann
7754 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
7755 76aef8fc Michael Hanselmann
    """Check prerequisites.
7756 76aef8fc Michael Hanselmann

7757 76aef8fc Michael Hanselmann
    """
7758 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
7759 76aef8fc Michael Hanselmann
7760 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
7761 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
7762 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
7763 5c983ee5 Iustin Pop
                                 " repaired" % storage_type,
7764 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7765 76aef8fc Michael Hanselmann
7766 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
7767 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
7768 7e9c6a78 Iustin Pop
      if not inst.admin_up:
7769 7e9c6a78 Iustin Pop
        continue
7770 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
7771 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
7772 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
7773 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
7774 76aef8fc Michael Hanselmann
7775 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
7776 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
7777 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
7778 76aef8fc Michael Hanselmann
7779 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
7780 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
7781 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
7782 76aef8fc Michael Hanselmann
                                           self.op.name,
7783 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
7784 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
7785 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
7786 76aef8fc Michael Hanselmann
7787 76aef8fc Michael Hanselmann
7788 f7e7689f Iustin Pop
class LUNodeEvacuationStrategy(NoHooksLU):
7789 f7e7689f Iustin Pop
  """Computes the node evacuation strategy.
7790 f7e7689f Iustin Pop

7791 f7e7689f Iustin Pop
  """
7792 f7e7689f Iustin Pop
  _OP_REQP = ["nodes"]
7793 f7e7689f Iustin Pop
  REQ_BGL = False
7794 f7e7689f Iustin Pop
7795 f7e7689f Iustin Pop
  def CheckArguments(self):
7796 f7e7689f Iustin Pop
    if not hasattr(self.op, "remote_node"):
7797 f7e7689f Iustin Pop
      self.op.remote_node = None
7798 f7e7689f Iustin Pop
    if not hasattr(self.op, "iallocator"):
7799 f7e7689f Iustin Pop
      self.op.iallocator = None
7800 f7e7689f Iustin Pop
    if self.op.remote_node is not None and self.op.iallocator is not None:
7801 f7e7689f Iustin Pop
      raise errors.OpPrereqError("Give either the iallocator or the new"
7802 f7e7689f Iustin Pop
                                 " secondary, not both", errors.ECODE_INVAL)
7803 f7e7689f Iustin Pop
7804 f7e7689f Iustin Pop
  def ExpandNames(self):
7805 f7e7689f Iustin Pop
    self.op.nodes = _GetWantedNodes(self, self.op.nodes)
7806 f7e7689f Iustin Pop
    self.needed_locks = locks = {}
7807 f7e7689f Iustin Pop
    if self.op.remote_node is None:
7808 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = locking.ALL_SET
7809 f7e7689f Iustin Pop
    else:
7810 f7e7689f Iustin Pop
      self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
7811 f7e7689f Iustin Pop
      locks[locking.LEVEL_NODE] = self.op.nodes + [self.op.remote_node]
7812 f7e7689f Iustin Pop
7813 f7e7689f Iustin Pop
  def CheckPrereq(self):
7814 f7e7689f Iustin Pop
    pass
7815 f7e7689f Iustin Pop
7816 f7e7689f Iustin Pop
  def Exec(self, feedback_fn):
7817 f7e7689f Iustin Pop
    if self.op.remote_node is not None:
7818 f7e7689f Iustin Pop
      instances = []
7819 f7e7689f Iustin Pop
      for node in self.op.nodes:
7820 f7e7689f Iustin Pop
        instances.extend(_GetNodeSecondaryInstances(self.cfg, node))
7821 f7e7689f Iustin Pop
      result = []
7822 f7e7689f Iustin Pop
      for i in instances:
7823 f7e7689f Iustin Pop
        if i.primary_node == self.op.remote_node:
7824 f7e7689f Iustin Pop
          raise errors.OpPrereqError("Node %s is the primary node of"
7825 f7e7689f Iustin Pop
                                     " instance %s, cannot use it as"
7826 f7e7689f Iustin Pop
                                     " secondary" %
7827 f7e7689f Iustin Pop
                                     (self.op.remote_node, i.name),
7828 f7e7689f Iustin Pop
                                     errors.ECODE_INVAL)
7829 f7e7689f Iustin Pop
        result.append([i.name, self.op.remote_node])
7830 f7e7689f Iustin Pop
    else:
7831 f7e7689f Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
7832 f7e7689f Iustin Pop
                       mode=constants.IALLOCATOR_MODE_MEVAC,
7833 f7e7689f Iustin Pop
                       evac_nodes=self.op.nodes)
7834 f7e7689f Iustin Pop
      ial.Run(self.op.iallocator, validate=True)
7835 f7e7689f Iustin Pop
      if not ial.success:
7836 f7e7689f Iustin Pop
        raise errors.OpExecError("No valid evacuation solution: %s" % ial.info,
7837 f7e7689f Iustin Pop
                                 errors.ECODE_NORES)
7838 f7e7689f Iustin Pop
      result = ial.result
7839 f7e7689f Iustin Pop
    return result
7840 f7e7689f Iustin Pop
7841 f7e7689f Iustin Pop
7842 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
7843 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
7844 8729e0d7 Iustin Pop

7845 8729e0d7 Iustin Pop
  """
7846 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
7847 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7848 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
7849 31e63dbf Guido Trotter
  REQ_BGL = False
7850 31e63dbf Guido Trotter
7851 31e63dbf Guido Trotter
  def ExpandNames(self):
7852 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
7853 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7854 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7855 31e63dbf Guido Trotter
7856 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
7857 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
7858 31e63dbf Guido Trotter
      self._LockInstancesNodes()
7859 8729e0d7 Iustin Pop
7860 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
7861 8729e0d7 Iustin Pop
    """Build hooks env.
7862 8729e0d7 Iustin Pop

7863 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
7864 8729e0d7 Iustin Pop

7865 8729e0d7 Iustin Pop
    """
7866 8729e0d7 Iustin Pop
    env = {
7867 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
7868 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
7869 8729e0d7 Iustin Pop
      }
7870 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7871 abd8e836 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7872 8729e0d7 Iustin Pop
    return env, nl, nl
7873 8729e0d7 Iustin Pop
7874 8729e0d7 Iustin Pop
  def CheckPrereq(self):
7875 8729e0d7 Iustin Pop
    """Check prerequisites.
7876 8729e0d7 Iustin Pop

7877 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
7878 8729e0d7 Iustin Pop

7879 8729e0d7 Iustin Pop
    """
7880 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7881 31e63dbf Guido Trotter
    assert instance is not None, \
7882 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7883 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
7884 6b12959c Iustin Pop
    for node in nodenames:
7885 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
7886 7527a8a4 Iustin Pop
7887 31e63dbf Guido Trotter
7888 8729e0d7 Iustin Pop
    self.instance = instance
7889 8729e0d7 Iustin Pop
7890 728489a3 Guido Trotter
    if instance.disk_template not in constants.DTS_GROWABLE:
7891 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
7892 5c983ee5 Iustin Pop
                                 " growing.", errors.ECODE_INVAL)
7893 8729e0d7 Iustin Pop
7894 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
7895 8729e0d7 Iustin Pop
7896 2c42c5df Guido Trotter
    if instance.disk_template != constants.DT_FILE:
7897 2c42c5df Guido Trotter
      # TODO: check the free disk space for file, when that feature will be
7898 2c42c5df Guido Trotter
      # supported
7899 2c42c5df Guido Trotter
      _CheckNodesFreeDisk(self, nodenames, self.op.amount)
7900 8729e0d7 Iustin Pop
7901 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7902 8729e0d7 Iustin Pop
    """Execute disk grow.
7903 8729e0d7 Iustin Pop

7904 8729e0d7 Iustin Pop
    """
7905 8729e0d7 Iustin Pop
    instance = self.instance
7906 ad24e046 Iustin Pop
    disk = self.disk
7907 6b12959c Iustin Pop
    for node in instance.all_nodes:
7908 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7909 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7910 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7911 5bc556dd Michael Hanselmann
7912 5bc556dd Michael Hanselmann
      # TODO: Rewrite code to work properly
7913 5bc556dd Michael Hanselmann
      # DRBD goes into sync mode for a short amount of time after executing the
7914 5bc556dd Michael Hanselmann
      # "resize" command. DRBD 8.x below version 8.0.13 contains a bug whereby
7915 5bc556dd Michael Hanselmann
      # calling "resize" in sync mode fails. Sleeping for a short amount of
7916 5bc556dd Michael Hanselmann
      # time is a work-around.
7917 5bc556dd Michael Hanselmann
      time.sleep(5)
7918 5bc556dd Michael Hanselmann
7919 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7920 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7921 6605411d Iustin Pop
    if self.op.wait_for_sync:
7922 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7923 6605411d Iustin Pop
      if disk_abort:
7924 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7925 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7926 8729e0d7 Iustin Pop
7927 8729e0d7 Iustin Pop
7928 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7929 a8083063 Iustin Pop
  """Query runtime instance data.
7930 a8083063 Iustin Pop

7931 a8083063 Iustin Pop
  """
7932 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7933 a987fa48 Guido Trotter
  REQ_BGL = False
7934 ae5849b5 Michael Hanselmann
7935 a987fa48 Guido Trotter
  def ExpandNames(self):
7936 a987fa48 Guido Trotter
    self.needed_locks = {}
7937 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7938 a987fa48 Guido Trotter
7939 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7940 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'",
7941 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
7942 a987fa48 Guido Trotter
7943 a987fa48 Guido Trotter
    if self.op.instances:
7944 a987fa48 Guido Trotter
      self.wanted_names = []
7945 a987fa48 Guido Trotter
      for name in self.op.instances:
7946 cf26a87a Iustin Pop
        full_name = _ExpandInstanceName(self.cfg, name)
7947 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7948 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7949 a987fa48 Guido Trotter
    else:
7950 a987fa48 Guido Trotter
      self.wanted_names = None
7951 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7952 a987fa48 Guido Trotter
7953 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7954 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7955 a987fa48 Guido Trotter
7956 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7957 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7958 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7959 a8083063 Iustin Pop
7960 a8083063 Iustin Pop
  def CheckPrereq(self):
7961 a8083063 Iustin Pop
    """Check prerequisites.
7962 a8083063 Iustin Pop

7963 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7964 a8083063 Iustin Pop

7965 a8083063 Iustin Pop
    """
7966 a987fa48 Guido Trotter
    if self.wanted_names is None:
7967 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7968 a8083063 Iustin Pop
7969 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7970 a987fa48 Guido Trotter
                             in self.wanted_names]
7971 a987fa48 Guido Trotter
    return
7972 a8083063 Iustin Pop
7973 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7974 98825740 Michael Hanselmann
    """Returns the status of a block device
7975 98825740 Michael Hanselmann

7976 98825740 Michael Hanselmann
    """
7977 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7978 98825740 Michael Hanselmann
      return None
7979 98825740 Michael Hanselmann
7980 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7981 98825740 Michael Hanselmann
7982 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7983 98825740 Michael Hanselmann
    if result.offline:
7984 98825740 Michael Hanselmann
      return None
7985 98825740 Michael Hanselmann
7986 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7987 98825740 Michael Hanselmann
7988 98825740 Michael Hanselmann
    status = result.payload
7989 ddfe2228 Michael Hanselmann
    if status is None:
7990 ddfe2228 Michael Hanselmann
      return None
7991 98825740 Michael Hanselmann
7992 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7993 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7994 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7995 98825740 Michael Hanselmann
7996 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7997 a8083063 Iustin Pop
    """Compute block device status.
7998 a8083063 Iustin Pop

7999 a8083063 Iustin Pop
    """
8000 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
8001 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
8002 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
8003 a8083063 Iustin Pop
        snode = dev.logical_id[1]
8004 a8083063 Iustin Pop
      else:
8005 a8083063 Iustin Pop
        snode = dev.logical_id[0]
8006 a8083063 Iustin Pop
8007 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
8008 98825740 Michael Hanselmann
                                              instance.name, dev)
8009 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
8010 a8083063 Iustin Pop
8011 a8083063 Iustin Pop
    if dev.children:
8012 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
8013 a8083063 Iustin Pop
                      for child in dev.children]
8014 a8083063 Iustin Pop
    else:
8015 a8083063 Iustin Pop
      dev_children = []
8016 a8083063 Iustin Pop
8017 a8083063 Iustin Pop
    data = {
8018 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
8019 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
8020 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
8021 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
8022 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
8023 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
8024 a8083063 Iustin Pop
      "children": dev_children,
8025 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
8026 c98162a7 Iustin Pop
      "size": dev.size,
8027 a8083063 Iustin Pop
      }
8028 a8083063 Iustin Pop
8029 a8083063 Iustin Pop
    return data
8030 a8083063 Iustin Pop
8031 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8032 a8083063 Iustin Pop
    """Gather and return data"""
8033 a8083063 Iustin Pop
    result = {}
8034 338e51e8 Iustin Pop
8035 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
8036 338e51e8 Iustin Pop
8037 a8083063 Iustin Pop
    for instance in self.wanted_instances:
8038 57821cac Iustin Pop
      if not self.op.static:
8039 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
8040 57821cac Iustin Pop
                                                  instance.name,
8041 57821cac Iustin Pop
                                                  instance.hypervisor)
8042 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
8043 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
8044 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
8045 57821cac Iustin Pop
          remote_state = "up"
8046 57821cac Iustin Pop
        else:
8047 57821cac Iustin Pop
          remote_state = "down"
8048 a8083063 Iustin Pop
      else:
8049 57821cac Iustin Pop
        remote_state = None
8050 0d68c45d Iustin Pop
      if instance.admin_up:
8051 a8083063 Iustin Pop
        config_state = "up"
8052 0d68c45d Iustin Pop
      else:
8053 0d68c45d Iustin Pop
        config_state = "down"
8054 a8083063 Iustin Pop
8055 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
8056 a8083063 Iustin Pop
               for device in instance.disks]
8057 a8083063 Iustin Pop
8058 a8083063 Iustin Pop
      idict = {
8059 a8083063 Iustin Pop
        "name": instance.name,
8060 a8083063 Iustin Pop
        "config_state": config_state,
8061 a8083063 Iustin Pop
        "run_state": remote_state,
8062 a8083063 Iustin Pop
        "pnode": instance.primary_node,
8063 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
8064 a8083063 Iustin Pop
        "os": instance.os,
8065 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
8066 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
8067 a8083063 Iustin Pop
        "disks": disks,
8068 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
8069 24838135 Iustin Pop
        "network_port": instance.network_port,
8070 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
8071 7736a5f2 Iustin Pop
        "hv_actual": cluster.FillHV(instance, skip_globals=True),
8072 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
8073 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
8074 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
8075 90f72445 Iustin Pop
        "mtime": instance.mtime,
8076 90f72445 Iustin Pop
        "ctime": instance.ctime,
8077 033d58b0 Iustin Pop
        "uuid": instance.uuid,
8078 a8083063 Iustin Pop
        }
8079 a8083063 Iustin Pop
8080 a8083063 Iustin Pop
      result[instance.name] = idict
8081 a8083063 Iustin Pop
8082 a8083063 Iustin Pop
    return result
8083 a8083063 Iustin Pop
8084 a8083063 Iustin Pop
8085 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
8086 a8083063 Iustin Pop
  """Modifies an instances's parameters.
8087 a8083063 Iustin Pop

8088 a8083063 Iustin Pop
  """
8089 a8083063 Iustin Pop
  HPATH = "instance-modify"
8090 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8091 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
8092 1a5c7281 Guido Trotter
  REQ_BGL = False
8093 1a5c7281 Guido Trotter
8094 24991749 Iustin Pop
  def CheckArguments(self):
8095 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
8096 24991749 Iustin Pop
      self.op.nics = []
8097 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
8098 24991749 Iustin Pop
      self.op.disks = []
8099 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
8100 24991749 Iustin Pop
      self.op.beparams = {}
8101 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
8102 24991749 Iustin Pop
      self.op.hvparams = {}
8103 e29e9550 Iustin Pop
    if not hasattr(self.op, "disk_template"):
8104 e29e9550 Iustin Pop
      self.op.disk_template = None
8105 e29e9550 Iustin Pop
    if not hasattr(self.op, "remote_node"):
8106 e29e9550 Iustin Pop
      self.op.remote_node = None
8107 96b39bcc Iustin Pop
    if not hasattr(self.op, "os_name"):
8108 96b39bcc Iustin Pop
      self.op.os_name = None
8109 96b39bcc Iustin Pop
    if not hasattr(self.op, "force_variant"):
8110 96b39bcc Iustin Pop
      self.op.force_variant = False
8111 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
8112 e29e9550 Iustin Pop
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8113 96b39bcc Iustin Pop
            self.op.hvparams or self.op.beparams or self.op.os_name):
8114 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
8115 24991749 Iustin Pop
8116 7736a5f2 Iustin Pop
    if self.op.hvparams:
8117 7736a5f2 Iustin Pop
      _CheckGlobalHvParams(self.op.hvparams)
8118 7736a5f2 Iustin Pop
8119 24991749 Iustin Pop
    # Disk validation
8120 24991749 Iustin Pop
    disk_addremove = 0
8121 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8122 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8123 24991749 Iustin Pop
        disk_addremove += 1
8124 24991749 Iustin Pop
        continue
8125 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8126 24991749 Iustin Pop
        disk_addremove += 1
8127 24991749 Iustin Pop
      else:
8128 24991749 Iustin Pop
        if not isinstance(disk_op, int):
8129 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index", errors.ECODE_INVAL)
8130 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
8131 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
8132 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8133 8b46606c Guido Trotter
8134 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
8135 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
8136 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
8137 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
8138 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8139 24991749 Iustin Pop
        size = disk_dict.get('size', None)
8140 24991749 Iustin Pop
        if size is None:
8141 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing",
8142 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8143 24991749 Iustin Pop
        try:
8144 24991749 Iustin Pop
          size = int(size)
8145 691744c4 Iustin Pop
        except (TypeError, ValueError), err:
8146 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
8147 5c983ee5 Iustin Pop
                                     str(err), errors.ECODE_INVAL)
8148 24991749 Iustin Pop
        disk_dict['size'] = size
8149 24991749 Iustin Pop
      else:
8150 24991749 Iustin Pop
        # modification of disk
8151 24991749 Iustin Pop
        if 'size' in disk_dict:
8152 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
8153 5c983ee5 Iustin Pop
                                     " grow-disk", errors.ECODE_INVAL)
8154 24991749 Iustin Pop
8155 24991749 Iustin Pop
    if disk_addremove > 1:
8156 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
8157 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8158 24991749 Iustin Pop
8159 e29e9550 Iustin Pop
    if self.op.disks and self.op.disk_template is not None:
8160 e29e9550 Iustin Pop
      raise errors.OpPrereqError("Disk template conversion and other disk"
8161 e29e9550 Iustin Pop
                                 " changes not supported at the same time",
8162 e29e9550 Iustin Pop
                                 errors.ECODE_INVAL)
8163 e29e9550 Iustin Pop
8164 e29e9550 Iustin Pop
    if self.op.disk_template:
8165 e29e9550 Iustin Pop
      _CheckDiskTemplate(self.op.disk_template)
8166 e29e9550 Iustin Pop
      if (self.op.disk_template in constants.DTS_NET_MIRROR and
8167 e29e9550 Iustin Pop
          self.op.remote_node is None):
8168 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Changing the disk template to a mirrored"
8169 e29e9550 Iustin Pop
                                   " one requires specifying a secondary node",
8170 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8171 e29e9550 Iustin Pop
8172 24991749 Iustin Pop
    # NIC validation
8173 24991749 Iustin Pop
    nic_addremove = 0
8174 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8175 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8176 24991749 Iustin Pop
        nic_addremove += 1
8177 24991749 Iustin Pop
        continue
8178 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8179 24991749 Iustin Pop
        nic_addremove += 1
8180 24991749 Iustin Pop
      else:
8181 24991749 Iustin Pop
        if not isinstance(nic_op, int):
8182 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index", errors.ECODE_INVAL)
8183 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
8184 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
8185 5c983ee5 Iustin Pop
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
8186 24991749 Iustin Pop
8187 24991749 Iustin Pop
      # nic_dict should be a dict
8188 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
8189 24991749 Iustin Pop
      if nic_ip is not None:
8190 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
8191 24991749 Iustin Pop
          nic_dict['ip'] = None
8192 24991749 Iustin Pop
        else:
8193 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
8194 5c983ee5 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip,
8195 5c983ee5 Iustin Pop
                                       errors.ECODE_INVAL)
8196 5c44da6a Guido Trotter
8197 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
8198 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
8199 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
8200 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
8201 5c983ee5 Iustin Pop
                                   " at the same time", errors.ECODE_INVAL)
8202 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
8203 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
8204 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
8205 cd098c41 Guido Trotter
        nic_dict['link'] = None
8206 cd098c41 Guido Trotter
8207 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
8208 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
8209 5c44da6a Guido Trotter
        if nic_mac is None:
8210 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
8211 5c44da6a Guido Trotter
8212 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8213 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8214 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8215 82187135 Renรฉ Nussbaumer
          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
8216 82187135 Renรฉ Nussbaumer
8217 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
8218 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
8219 5c983ee5 Iustin Pop
                                     " modifying an existing nic",
8220 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8221 5c44da6a Guido Trotter
8222 24991749 Iustin Pop
    if nic_addremove > 1:
8223 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
8224 5c983ee5 Iustin Pop
                                 " supported at a time", errors.ECODE_INVAL)
8225 24991749 Iustin Pop
8226 1a5c7281 Guido Trotter
  def ExpandNames(self):
8227 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
8228 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
8229 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
8230 74409b12 Iustin Pop
8231 74409b12 Iustin Pop
  def DeclareLocks(self, level):
8232 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
8233 74409b12 Iustin Pop
      self._LockInstancesNodes()
8234 e29e9550 Iustin Pop
      if self.op.disk_template and self.op.remote_node:
8235 e29e9550 Iustin Pop
        self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
8236 e29e9550 Iustin Pop
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
8237 a8083063 Iustin Pop
8238 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8239 a8083063 Iustin Pop
    """Build hooks env.
8240 a8083063 Iustin Pop

8241 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
8242 a8083063 Iustin Pop

8243 a8083063 Iustin Pop
    """
8244 396e1b78 Michael Hanselmann
    args = dict()
8245 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
8246 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
8247 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
8248 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
8249 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
8250 d8dcf3c9 Guido Trotter
    # information at all.
8251 d8dcf3c9 Guido Trotter
    if self.op.nics:
8252 d8dcf3c9 Guido Trotter
      args['nics'] = []
8253 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
8254 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
8255 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
8256 d8dcf3c9 Guido Trotter
        if idx in nic_override:
8257 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
8258 d8dcf3c9 Guido Trotter
        else:
8259 d8dcf3c9 Guido Trotter
          this_nic_override = {}
8260 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
8261 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
8262 d8dcf3c9 Guido Trotter
        else:
8263 d8dcf3c9 Guido Trotter
          ip = nic.ip
8264 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
8265 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
8266 d8dcf3c9 Guido Trotter
        else:
8267 d8dcf3c9 Guido Trotter
          mac = nic.mac
8268 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
8269 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
8270 62f0dd02 Guido Trotter
        else:
8271 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
8272 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8273 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8274 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8275 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
8276 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
8277 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
8278 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
8279 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
8280 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
8281 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
8282 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
8283 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
8284 d8dcf3c9 Guido Trotter
8285 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
8286 e29e9550 Iustin Pop
    if self.op.disk_template:
8287 e29e9550 Iustin Pop
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
8288 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
8289 a8083063 Iustin Pop
    return env, nl, nl
8290 a8083063 Iustin Pop
8291 7e950d31 Iustin Pop
  @staticmethod
8292 7e950d31 Iustin Pop
  def _GetUpdatedParams(old_params, update_dict,
8293 0329617a Guido Trotter
                        default_values, parameter_types):
8294 0329617a Guido Trotter
    """Return the new params dict for the given params.
8295 0329617a Guido Trotter

8296 0329617a Guido Trotter
    @type old_params: dict
8297 f2fd87d7 Iustin Pop
    @param old_params: old parameters
8298 0329617a Guido Trotter
    @type update_dict: dict
8299 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
8300 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
8301 f2fd87d7 Iustin Pop
                        parameter to its default value
8302 0329617a Guido Trotter
    @type default_values: dict
8303 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
8304 0329617a Guido Trotter
    @type parameter_types: dict
8305 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
8306 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
8307 0329617a Guido Trotter
    @rtype: (dict, dict)
8308 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
8309 0329617a Guido Trotter

8310 0329617a Guido Trotter
    """
8311 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
8312 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
8313 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
8314 0329617a Guido Trotter
        try:
8315 0329617a Guido Trotter
          del params_copy[key]
8316 0329617a Guido Trotter
        except KeyError:
8317 0329617a Guido Trotter
          pass
8318 0329617a Guido Trotter
      else:
8319 0329617a Guido Trotter
        params_copy[key] = val
8320 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
8321 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
8322 0329617a Guido Trotter
    return (params_copy, params_filled)
8323 0329617a Guido Trotter
8324 a8083063 Iustin Pop
  def CheckPrereq(self):
8325 a8083063 Iustin Pop
    """Check prerequisites.
8326 a8083063 Iustin Pop

8327 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
8328 a8083063 Iustin Pop

8329 a8083063 Iustin Pop
    """
8330 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
8331 a8083063 Iustin Pop
8332 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
8333 31a853d2 Iustin Pop
8334 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
8335 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
8336 1a5c7281 Guido Trotter
    assert self.instance is not None, \
8337 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
8338 6b12959c Iustin Pop
    pnode = instance.primary_node
8339 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
8340 74409b12 Iustin Pop
8341 e29e9550 Iustin Pop
    if self.op.disk_template:
8342 e29e9550 Iustin Pop
      if instance.disk_template == self.op.disk_template:
8343 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Instance already has disk template %s" %
8344 e29e9550 Iustin Pop
                                   instance.disk_template, errors.ECODE_INVAL)
8345 e29e9550 Iustin Pop
8346 e29e9550 Iustin Pop
      if (instance.disk_template,
8347 e29e9550 Iustin Pop
          self.op.disk_template) not in self._DISK_CONVERSIONS:
8348 e29e9550 Iustin Pop
        raise errors.OpPrereqError("Unsupported disk template conversion from"
8349 e29e9550 Iustin Pop
                                   " %s to %s" % (instance.disk_template,
8350 e29e9550 Iustin Pop
                                                  self.op.disk_template),
8351 e29e9550 Iustin Pop
                                   errors.ECODE_INVAL)
8352 e29e9550 Iustin Pop
      if self.op.disk_template in constants.DTS_NET_MIRROR:
8353 e29e9550 Iustin Pop
        _CheckNodeOnline(self, self.op.remote_node)
8354 e29e9550 Iustin Pop
        _CheckNodeNotDrained(self, self.op.remote_node)
8355 e29e9550 Iustin Pop
        disks = [{"size": d.size} for d in instance.disks]
8356 e29e9550 Iustin Pop
        required = _ComputeDiskSize(self.op.disk_template, disks)
8357 e29e9550 Iustin Pop
        _CheckNodesFreeDisk(self, [self.op.remote_node], required)
8358 e29e9550 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot change disk template")
8359 e29e9550 Iustin Pop
8360 338e51e8 Iustin Pop
    # hvparams processing
8361 74409b12 Iustin Pop
    if self.op.hvparams:
8362 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
8363 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
8364 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
8365 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
8366 74409b12 Iustin Pop
      # local check
8367 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
8368 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
8369 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
8370 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
8371 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
8372 338e51e8 Iustin Pop
    else:
8373 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
8374 338e51e8 Iustin Pop
8375 338e51e8 Iustin Pop
    # beparams processing
8376 338e51e8 Iustin Pop
    if self.op.beparams:
8377 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
8378 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
8379 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
8380 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
8381 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
8382 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
8383 338e51e8 Iustin Pop
    else:
8384 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
8385 74409b12 Iustin Pop
8386 cfefe007 Guido Trotter
    self.warn = []
8387 647a5d80 Iustin Pop
8388 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
8389 647a5d80 Iustin Pop
      mem_check_list = [pnode]
8390 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8391 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
8392 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
8393 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
8394 72737a7f Iustin Pop
                                                  instance.hypervisor)
8395 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
8396 72737a7f Iustin Pop
                                         instance.hypervisor)
8397 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
8398 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
8399 070e998b Iustin Pop
      if msg:
8400 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
8401 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
8402 070e998b Iustin Pop
                         (pnode,  msg))
8403 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
8404 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
8405 070e998b Iustin Pop
                         " free memory information" % pnode)
8406 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
8407 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
8408 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
8409 cfefe007 Guido Trotter
      else:
8410 7ad1af4a Iustin Pop
        if instance_info.payload:
8411 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
8412 cfefe007 Guido Trotter
        else:
8413 cfefe007 Guido Trotter
          # Assume instance not running
8414 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
8415 cfefe007 Guido Trotter
          # and we have no other way to check)
8416 cfefe007 Guido Trotter
          current_mem = 0
8417 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
8418 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
8419 cfefe007 Guido Trotter
        if miss_mem > 0:
8420 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
8421 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
8422 5c983ee5 Iustin Pop
                                     " missing on its primary node" % miss_mem,
8423 5c983ee5 Iustin Pop
                                     errors.ECODE_NORES)
8424 cfefe007 Guido Trotter
8425 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
8426 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
8427 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
8428 ea33068f Iustin Pop
            continue
8429 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
8430 070e998b Iustin Pop
          if msg:
8431 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
8432 070e998b Iustin Pop
                             (node, msg))
8433 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
8434 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
8435 070e998b Iustin Pop
                             " memory information" % node)
8436 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
8437 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
8438 647a5d80 Iustin Pop
                             " secondary node %s" % node)
8439 5bc84f33 Alexander Schreiber
8440 24991749 Iustin Pop
    # NIC processing
8441 cd098c41 Guido Trotter
    self.nic_pnew = {}
8442 cd098c41 Guido Trotter
    self.nic_pinst = {}
8443 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8444 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8445 24991749 Iustin Pop
        if not instance.nics:
8446 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove",
8447 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8448 24991749 Iustin Pop
        continue
8449 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
8450 24991749 Iustin Pop
        # an existing nic
8451 21bcb9aa Michael Hanselmann
        if not instance.nics:
8452 21bcb9aa Michael Hanselmann
          raise errors.OpPrereqError("Invalid NIC index %s, instance has"
8453 21bcb9aa Michael Hanselmann
                                     " no NICs" % nic_op,
8454 21bcb9aa Michael Hanselmann
                                     errors.ECODE_INVAL)
8455 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
8456 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
8457 24991749 Iustin Pop
                                     " are 0 to %d" %
8458 21bcb9aa Michael Hanselmann
                                     (nic_op, len(instance.nics) - 1),
8459 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8460 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
8461 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
8462 cd098c41 Guido Trotter
      else:
8463 cd098c41 Guido Trotter
        old_nic_params = {}
8464 cd098c41 Guido Trotter
        old_nic_ip = None
8465 cd098c41 Guido Trotter
8466 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
8467 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
8468 cd098c41 Guido Trotter
                                 if key in nic_dict])
8469 cd098c41 Guido Trotter
8470 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
8471 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
8472 cd098c41 Guido Trotter
8473 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
8474 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
8475 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
8476 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
8477 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
8478 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
8479 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
8480 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
8481 cd098c41 Guido Trotter
8482 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
8483 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
8484 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8485 35c0c8da Iustin Pop
        if msg:
8486 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8487 24991749 Iustin Pop
          if self.force:
8488 24991749 Iustin Pop
            self.warn.append(msg)
8489 24991749 Iustin Pop
          else:
8490 5c983ee5 Iustin Pop
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
8491 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
8492 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
8493 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
8494 cd098c41 Guido Trotter
        else:
8495 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
8496 cd098c41 Guido Trotter
        if nic_ip is None:
8497 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
8498 5c983ee5 Iustin Pop
                                     ' on a routed nic', errors.ECODE_INVAL)
8499 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
8500 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
8501 5c44da6a Guido Trotter
        if nic_mac is None:
8502 5c983ee5 Iustin Pop
          raise errors.OpPrereqError('Cannot set the nic mac to None',
8503 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8504 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
8505 5c44da6a Guido Trotter
          # otherwise generate the mac
8506 36b66e6e Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC(self.proc.GetECId())
8507 5c44da6a Guido Trotter
        else:
8508 5c44da6a Guido Trotter
          # or validate/reserve the current one
8509 36b66e6e Guido Trotter
          try:
8510 36b66e6e Guido Trotter
            self.cfg.ReserveMAC(nic_mac, self.proc.GetECId())
8511 36b66e6e Guido Trotter
          except errors.ReservationError:
8512 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
8513 5c983ee5 Iustin Pop
                                       " in cluster" % nic_mac,
8514 5c983ee5 Iustin Pop
                                       errors.ECODE_NOTUNIQUE)
8515 24991749 Iustin Pop
8516 24991749 Iustin Pop
    # DISK processing
8517 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
8518 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
8519 5c983ee5 Iustin Pop
                                 " diskless instances",
8520 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
8521 1122eb25 Iustin Pop
    for disk_op, _ in self.op.disks:
8522 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8523 24991749 Iustin Pop
        if len(instance.disks) == 1:
8524 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
8525 31624382 Iustin Pop
                                     " an instance", errors.ECODE_INVAL)
8526 31624382 Iustin Pop
        _CheckInstanceDown(self, instance, "cannot remove disks")
8527 24991749 Iustin Pop
8528 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
8529 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
8530 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
8531 5c983ee5 Iustin Pop
                                   " add more" % constants.MAX_DISKS,
8532 5c983ee5 Iustin Pop
                                   errors.ECODE_STATE)
8533 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
8534 24991749 Iustin Pop
        # an existing disk
8535 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
8536 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
8537 24991749 Iustin Pop
                                     " are 0 to %d" %
8538 5c983ee5 Iustin Pop
                                     (disk_op, len(instance.disks)),
8539 5c983ee5 Iustin Pop
                                     errors.ECODE_INVAL)
8540 24991749 Iustin Pop
8541 96b39bcc Iustin Pop
    # OS change
8542 96b39bcc Iustin Pop
    if self.op.os_name and not self.op.force:
8543 96b39bcc Iustin Pop
      _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
8544 96b39bcc Iustin Pop
                      self.op.force_variant)
8545 96b39bcc Iustin Pop
8546 a8083063 Iustin Pop
    return
8547 a8083063 Iustin Pop
8548 e29e9550 Iustin Pop
  def _ConvertPlainToDrbd(self, feedback_fn):
8549 e29e9550 Iustin Pop
    """Converts an instance from plain to drbd.
8550 e29e9550 Iustin Pop

8551 e29e9550 Iustin Pop
    """
8552 e29e9550 Iustin Pop
    feedback_fn("Converting template to drbd")
8553 e29e9550 Iustin Pop
    instance = self.instance
8554 e29e9550 Iustin Pop
    pnode = instance.primary_node
8555 e29e9550 Iustin Pop
    snode = self.op.remote_node
8556 e29e9550 Iustin Pop
8557 e29e9550 Iustin Pop
    # create a fake disk info for _GenerateDiskTemplate
8558 e29e9550 Iustin Pop
    disk_info = [{"size": d.size, "mode": d.mode} for d in instance.disks]
8559 e29e9550 Iustin Pop
    new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
8560 e29e9550 Iustin Pop
                                      instance.name, pnode, [snode],
8561 e29e9550 Iustin Pop
                                      disk_info, None, None, 0)
8562 e29e9550 Iustin Pop
    info = _GetInstanceInfoText(instance)
8563 e29e9550 Iustin Pop
    feedback_fn("Creating aditional volumes...")
8564 e29e9550 Iustin Pop
    # first, create the missing data and meta devices
8565 e29e9550 Iustin Pop
    for disk in new_disks:
8566 e29e9550 Iustin Pop
      # unfortunately this is... not too nice
8567 e29e9550 Iustin Pop
      _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
8568 e29e9550 Iustin Pop
                            info, True)
8569 e29e9550 Iustin Pop
      for child in disk.children:
8570 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, snode, instance, child, info, True)
8571 e29e9550 Iustin Pop
    # at this stage, all new LVs have been created, we can rename the
8572 e29e9550 Iustin Pop
    # old ones
8573 e29e9550 Iustin Pop
    feedback_fn("Renaming original volumes...")
8574 e29e9550 Iustin Pop
    rename_list = [(o, n.children[0].logical_id)
8575 e29e9550 Iustin Pop
                   for (o, n) in zip(instance.disks, new_disks)]
8576 e29e9550 Iustin Pop
    result = self.rpc.call_blockdev_rename(pnode, rename_list)
8577 e29e9550 Iustin Pop
    result.Raise("Failed to rename original LVs")
8578 e29e9550 Iustin Pop
8579 e29e9550 Iustin Pop
    feedback_fn("Initializing DRBD devices...")
8580 e29e9550 Iustin Pop
    # all child devices are in place, we can now create the DRBD devices
8581 e29e9550 Iustin Pop
    for disk in new_disks:
8582 e29e9550 Iustin Pop
      for node in [pnode, snode]:
8583 e29e9550 Iustin Pop
        f_create = node == pnode
8584 e29e9550 Iustin Pop
        _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
8585 e29e9550 Iustin Pop
8586 e29e9550 Iustin Pop
    # at this point, the instance has been modified
8587 e29e9550 Iustin Pop
    instance.disk_template = constants.DT_DRBD8
8588 e29e9550 Iustin Pop
    instance.disks = new_disks
8589 e29e9550 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8590 e29e9550 Iustin Pop
8591 e29e9550 Iustin Pop
    # disks are created, waiting for sync
8592 e29e9550 Iustin Pop
    disk_abort = not _WaitForSync(self, instance)
8593 e29e9550 Iustin Pop
    if disk_abort:
8594 e29e9550 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
8595 e29e9550 Iustin Pop
                               " this instance, please cleanup manually")
8596 e29e9550 Iustin Pop
8597 2f414c48 Iustin Pop
  def _ConvertDrbdToPlain(self, feedback_fn):
8598 2f414c48 Iustin Pop
    """Converts an instance from drbd to plain.
8599 2f414c48 Iustin Pop

8600 2f414c48 Iustin Pop
    """
8601 2f414c48 Iustin Pop
    instance = self.instance
8602 2f414c48 Iustin Pop
    assert len(instance.secondary_nodes) == 1
8603 2f414c48 Iustin Pop
    pnode = instance.primary_node
8604 2f414c48 Iustin Pop
    snode = instance.secondary_nodes[0]
8605 2f414c48 Iustin Pop
    feedback_fn("Converting template to plain")
8606 2f414c48 Iustin Pop
8607 2f414c48 Iustin Pop
    old_disks = instance.disks
8608 2f414c48 Iustin Pop
    new_disks = [d.children[0] for d in old_disks]
8609 2f414c48 Iustin Pop
8610 2f414c48 Iustin Pop
    # copy over size and mode
8611 2f414c48 Iustin Pop
    for parent, child in zip(old_disks, new_disks):
8612 2f414c48 Iustin Pop
      child.size = parent.size
8613 2f414c48 Iustin Pop
      child.mode = parent.mode
8614 2f414c48 Iustin Pop
8615 2f414c48 Iustin Pop
    # update instance structure
8616 2f414c48 Iustin Pop
    instance.disks = new_disks
8617 2f414c48 Iustin Pop
    instance.disk_template = constants.DT_PLAIN
8618 2f414c48 Iustin Pop
    self.cfg.Update(instance, feedback_fn)
8619 2f414c48 Iustin Pop
8620 2f414c48 Iustin Pop
    feedback_fn("Removing volumes on the secondary node...")
8621 2f414c48 Iustin Pop
    for disk in old_disks:
8622 2f414c48 Iustin Pop
      self.cfg.SetDiskID(disk, snode)
8623 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
8624 2f414c48 Iustin Pop
      if msg:
8625 2f414c48 Iustin Pop
        self.LogWarning("Could not remove block device %s on node %s,"
8626 2f414c48 Iustin Pop
                        " continuing anyway: %s", disk.iv_name, snode, msg)
8627 2f414c48 Iustin Pop
8628 2f414c48 Iustin Pop
    feedback_fn("Removing unneeded volumes on the primary node...")
8629 2f414c48 Iustin Pop
    for idx, disk in enumerate(old_disks):
8630 2f414c48 Iustin Pop
      meta = disk.children[1]
8631 2f414c48 Iustin Pop
      self.cfg.SetDiskID(meta, pnode)
8632 2f414c48 Iustin Pop
      msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
8633 2f414c48 Iustin Pop
      if msg:
8634 2f414c48 Iustin Pop
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
8635 2f414c48 Iustin Pop
                        " continuing anyway: %s", idx, pnode, msg)
8636 2f414c48 Iustin Pop
8637 2f414c48 Iustin Pop
8638 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8639 a8083063 Iustin Pop
    """Modifies an instance.
8640 a8083063 Iustin Pop

8641 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
8642 24991749 Iustin Pop

8643 a8083063 Iustin Pop
    """
8644 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
8645 cfefe007 Guido Trotter
    # feedback_fn there.
8646 cfefe007 Guido Trotter
    for warn in self.warn:
8647 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
8648 cfefe007 Guido Trotter
8649 a8083063 Iustin Pop
    result = []
8650 a8083063 Iustin Pop
    instance = self.instance
8651 24991749 Iustin Pop
    # disk changes
8652 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
8653 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
8654 24991749 Iustin Pop
        # remove the last disk
8655 24991749 Iustin Pop
        device = instance.disks.pop()
8656 24991749 Iustin Pop
        device_idx = len(instance.disks)
8657 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
8658 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
8659 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
8660 e1bc0878 Iustin Pop
          if msg:
8661 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
8662 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
8663 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
8664 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
8665 24991749 Iustin Pop
        # add a new disk
8666 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
8667 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
8668 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
8669 24991749 Iustin Pop
        else:
8670 24991749 Iustin Pop
          file_driver = file_path = None
8671 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
8672 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
8673 24991749 Iustin Pop
                                         instance.disk_template,
8674 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
8675 24991749 Iustin Pop
                                         instance.secondary_nodes,
8676 24991749 Iustin Pop
                                         [disk_dict],
8677 24991749 Iustin Pop
                                         file_path,
8678 24991749 Iustin Pop
                                         file_driver,
8679 24991749 Iustin Pop
                                         disk_idx_base)[0]
8680 24991749 Iustin Pop
        instance.disks.append(new_disk)
8681 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
8682 24991749 Iustin Pop
8683 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
8684 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
8685 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
8686 24991749 Iustin Pop
        #HARDCODE
8687 428958aa Iustin Pop
        for node in instance.all_nodes:
8688 428958aa Iustin Pop
          f_create = node == instance.primary_node
8689 796cab27 Iustin Pop
          try:
8690 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
8691 428958aa Iustin Pop
                            f_create, info, f_create)
8692 1492cca7 Iustin Pop
          except errors.OpExecError, err:
8693 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
8694 428958aa Iustin Pop
                            " node %s: %s",
8695 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
8696 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
8697 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
8698 24991749 Iustin Pop
      else:
8699 24991749 Iustin Pop
        # change a given disk
8700 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
8701 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
8702 e29e9550 Iustin Pop
8703 e29e9550 Iustin Pop
    if self.op.disk_template:
8704 e29e9550 Iustin Pop
      r_shut = _ShutdownInstanceDisks(self, instance)
8705 e29e9550 Iustin Pop
      if not r_shut:
8706 e29e9550 Iustin Pop
        raise errors.OpExecError("Cannot shutdow instance disks, unable to"
8707 e29e9550 Iustin Pop
                                 " proceed with disk template conversion")
8708 e29e9550 Iustin Pop
      mode = (instance.disk_template, self.op.disk_template)
8709 e29e9550 Iustin Pop
      try:
8710 e29e9550 Iustin Pop
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
8711 e29e9550 Iustin Pop
      except:
8712 e29e9550 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
8713 e29e9550 Iustin Pop
        raise
8714 e29e9550 Iustin Pop
      result.append(("disk_template", self.op.disk_template))
8715 e29e9550 Iustin Pop
8716 24991749 Iustin Pop
    # NIC changes
8717 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
8718 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
8719 24991749 Iustin Pop
        # remove the last nic
8720 24991749 Iustin Pop
        del instance.nics[-1]
8721 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
8722 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
8723 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
8724 5c44da6a Guido Trotter
        mac = nic_dict['mac']
8725 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
8726 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
8727 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
8728 24991749 Iustin Pop
        instance.nics.append(new_nic)
8729 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
8730 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
8731 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
8732 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
8733 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
8734 cd098c41 Guido Trotter
                       )))
8735 24991749 Iustin Pop
      else:
8736 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
8737 24991749 Iustin Pop
          if key in nic_dict:
8738 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
8739 beabf067 Guido Trotter
        if nic_op in self.nic_pinst:
8740 beabf067 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pinst[nic_op]
8741 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
8742 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
8743 24991749 Iustin Pop
8744 24991749 Iustin Pop
    # hvparams changes
8745 74409b12 Iustin Pop
    if self.op.hvparams:
8746 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
8747 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
8748 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
8749 24991749 Iustin Pop
8750 24991749 Iustin Pop
    # beparams changes
8751 338e51e8 Iustin Pop
    if self.op.beparams:
8752 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
8753 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
8754 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
8755 a8083063 Iustin Pop
8756 96b39bcc Iustin Pop
    # OS change
8757 96b39bcc Iustin Pop
    if self.op.os_name:
8758 96b39bcc Iustin Pop
      instance.os = self.op.os_name
8759 96b39bcc Iustin Pop
8760 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
8761 a8083063 Iustin Pop
8762 a8083063 Iustin Pop
    return result
8763 a8083063 Iustin Pop
8764 e29e9550 Iustin Pop
  _DISK_CONVERSIONS = {
8765 e29e9550 Iustin Pop
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
8766 2f414c48 Iustin Pop
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
8767 e29e9550 Iustin Pop
    }
8768 a8083063 Iustin Pop
8769 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
8770 a8083063 Iustin Pop
  """Query the exports list
8771 a8083063 Iustin Pop

8772 a8083063 Iustin Pop
  """
8773 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
8774 21a15682 Guido Trotter
  REQ_BGL = False
8775 21a15682 Guido Trotter
8776 21a15682 Guido Trotter
  def ExpandNames(self):
8777 21a15682 Guido Trotter
    self.needed_locks = {}
8778 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
8779 21a15682 Guido Trotter
    if not self.op.nodes:
8780 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8781 21a15682 Guido Trotter
    else:
8782 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
8783 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
8784 a8083063 Iustin Pop
8785 a8083063 Iustin Pop
  def CheckPrereq(self):
8786 21a15682 Guido Trotter
    """Check prerequisites.
8787 a8083063 Iustin Pop

8788 a8083063 Iustin Pop
    """
8789 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
8790 a8083063 Iustin Pop
8791 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8792 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
8793 a8083063 Iustin Pop

8794 e4376078 Iustin Pop
    @rtype: dict
8795 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
8796 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
8797 e4376078 Iustin Pop
        that node.
8798 a8083063 Iustin Pop

8799 a8083063 Iustin Pop
    """
8800 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
8801 b04285f2 Guido Trotter
    result = {}
8802 b04285f2 Guido Trotter
    for node in rpcresult:
8803 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
8804 b04285f2 Guido Trotter
        result[node] = False
8805 b04285f2 Guido Trotter
      else:
8806 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
8807 b04285f2 Guido Trotter
8808 b04285f2 Guido Trotter
    return result
8809 a8083063 Iustin Pop
8810 a8083063 Iustin Pop
8811 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
8812 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
8813 a8083063 Iustin Pop

8814 a8083063 Iustin Pop
  """
8815 a8083063 Iustin Pop
  HPATH = "instance-export"
8816 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
8817 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
8818 6657590e Guido Trotter
  REQ_BGL = False
8819 6657590e Guido Trotter
8820 17c3f802 Guido Trotter
  def CheckArguments(self):
8821 17c3f802 Guido Trotter
    """Check the arguments.
8822 17c3f802 Guido Trotter

8823 17c3f802 Guido Trotter
    """
8824 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
8825 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
8826 17c3f802 Guido Trotter
8827 6657590e Guido Trotter
  def ExpandNames(self):
8828 6657590e Guido Trotter
    self._ExpandAndLockInstance()
8829 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
8830 6657590e Guido Trotter
    #
8831 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
8832 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
8833 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
8834 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
8835 6657590e Guido Trotter
    #    then one to remove, after
8836 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
8837 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
8838 6657590e Guido Trotter
8839 6657590e Guido Trotter
  def DeclareLocks(self, level):
8840 6657590e Guido Trotter
    """Last minute lock declaration."""
8841 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
8842 a8083063 Iustin Pop
8843 a8083063 Iustin Pop
  def BuildHooksEnv(self):
8844 a8083063 Iustin Pop
    """Build hooks env.
8845 a8083063 Iustin Pop

8846 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
8847 a8083063 Iustin Pop

8848 a8083063 Iustin Pop
    """
8849 a8083063 Iustin Pop
    env = {
8850 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
8851 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
8852 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
8853 a8083063 Iustin Pop
      }
8854 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
8855 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
8856 a8083063 Iustin Pop
          self.op.target_node]
8857 a8083063 Iustin Pop
    return env, nl, nl
8858 a8083063 Iustin Pop
8859 a8083063 Iustin Pop
  def CheckPrereq(self):
8860 a8083063 Iustin Pop
    """Check prerequisites.
8861 a8083063 Iustin Pop

8862 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
8863 a8083063 Iustin Pop

8864 a8083063 Iustin Pop
    """
8865 6657590e Guido Trotter
    instance_name = self.op.instance_name
8866 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
8867 6657590e Guido Trotter
    assert self.instance is not None, \
8868 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
8869 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
8870 a8083063 Iustin Pop
8871 cf26a87a Iustin Pop
    self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
8872 cf26a87a Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
8873 cf26a87a Iustin Pop
    assert self.dst_node is not None
8874 a8083063 Iustin Pop
8875 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
8876 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
8877 a8083063 Iustin Pop
8878 b6023d6c Manuel Franceschini
    # instance disk type verification
8879 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
8880 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
8881 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
8882 5c983ee5 Iustin Pop
                                   " file-based disks", errors.ECODE_INVAL)
8883 b6023d6c Manuel Franceschini
8884 e311ed53 Michael Hanselmann
  def _CreateSnapshots(self, feedback_fn):
8885 e311ed53 Michael Hanselmann
    """Creates an LVM snapshot for every disk of the instance.
8886 e311ed53 Michael Hanselmann

8887 e311ed53 Michael Hanselmann
    @return: List of snapshots as L{objects.Disk} instances
8888 e311ed53 Michael Hanselmann

8889 e311ed53 Michael Hanselmann
    """
8890 e311ed53 Michael Hanselmann
    instance = self.instance
8891 e311ed53 Michael Hanselmann
    src_node = instance.primary_node
8892 e311ed53 Michael Hanselmann
8893 e311ed53 Michael Hanselmann
    vgname = self.cfg.GetVGName()
8894 e311ed53 Michael Hanselmann
8895 e311ed53 Michael Hanselmann
    snap_disks = []
8896 e311ed53 Michael Hanselmann
8897 e311ed53 Michael Hanselmann
    for idx, disk in enumerate(instance.disks):
8898 e311ed53 Michael Hanselmann
      feedback_fn("Creating a snapshot of disk/%s on node %s" %
8899 e311ed53 Michael Hanselmann
                  (idx, src_node))
8900 e311ed53 Michael Hanselmann
8901 e311ed53 Michael Hanselmann
      # result.payload will be a snapshot of an lvm leaf of the one we
8902 e311ed53 Michael Hanselmann
      # passed
8903 e311ed53 Michael Hanselmann
      result = self.rpc.call_blockdev_snapshot(src_node, disk)
8904 e311ed53 Michael Hanselmann
      msg = result.fail_msg
8905 e311ed53 Michael Hanselmann
      if msg:
8906 e311ed53 Michael Hanselmann
        self.LogWarning("Could not snapshot disk/%s on node %s: %s",
8907 e311ed53 Michael Hanselmann
                        idx, src_node, msg)
8908 e311ed53 Michael Hanselmann
        snap_disks.append(False)
8909 e311ed53 Michael Hanselmann
      else:
8910 e311ed53 Michael Hanselmann
        disk_id = (vgname, result.payload)
8911 e311ed53 Michael Hanselmann
        new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
8912 e311ed53 Michael Hanselmann
                               logical_id=disk_id, physical_id=disk_id,
8913 e311ed53 Michael Hanselmann
                               iv_name=disk.iv_name)
8914 e311ed53 Michael Hanselmann
        snap_disks.append(new_dev)
8915 e311ed53 Michael Hanselmann
8916 e311ed53 Michael Hanselmann
    return snap_disks
8917 e311ed53 Michael Hanselmann
8918 e311ed53 Michael Hanselmann
  def _RemoveSnapshot(self, feedback_fn, snap_disks, disk_index):
8919 e311ed53 Michael Hanselmann
    """Removes an LVM snapshot.
8920 e311ed53 Michael Hanselmann

8921 e311ed53 Michael Hanselmann
    @type snap_disks: list
8922 e311ed53 Michael Hanselmann
    @param snap_disks: The list of all snapshots as returned by
8923 e311ed53 Michael Hanselmann
                       L{_CreateSnapshots}
8924 e311ed53 Michael Hanselmann
    @type disk_index: number
8925 e311ed53 Michael Hanselmann
    @param disk_index: Index of the snapshot to be removed
8926 e311ed53 Michael Hanselmann
    @rtype: bool
8927 e311ed53 Michael Hanselmann
    @return: Whether removal was successful or not
8928 e311ed53 Michael Hanselmann

8929 e311ed53 Michael Hanselmann
    """
8930 e311ed53 Michael Hanselmann
    disk = snap_disks[disk_index]
8931 e311ed53 Michael Hanselmann
    if disk:
8932 e311ed53 Michael Hanselmann
      src_node = self.instance.primary_node
8933 e311ed53 Michael Hanselmann
8934 e311ed53 Michael Hanselmann
      feedback_fn("Removing snapshot of disk/%s on node %s" %
8935 e311ed53 Michael Hanselmann
                  (disk_index, src_node))
8936 e311ed53 Michael Hanselmann
8937 e311ed53 Michael Hanselmann
      result = self.rpc.call_blockdev_remove(src_node, disk)
8938 e311ed53 Michael Hanselmann
      if not result.fail_msg:
8939 e311ed53 Michael Hanselmann
        return True
8940 e311ed53 Michael Hanselmann
8941 e311ed53 Michael Hanselmann
      self.LogWarning("Could not remove snapshot for disk/%d from node"
8942 e311ed53 Michael Hanselmann
                      " %s: %s", disk_index, src_node, result.fail_msg)
8943 e311ed53 Michael Hanselmann
8944 e311ed53 Michael Hanselmann
    return False
8945 e311ed53 Michael Hanselmann
8946 e311ed53 Michael Hanselmann
  def _CleanupExports(self, feedback_fn):
8947 e311ed53 Michael Hanselmann
    """Removes exports of current instance from all other nodes.
8948 e311ed53 Michael Hanselmann

8949 e311ed53 Michael Hanselmann
    If an instance in a cluster with nodes A..D was exported to node C, its
8950 e311ed53 Michael Hanselmann
    exports will be removed from the nodes A, B and D.
8951 e311ed53 Michael Hanselmann

8952 e311ed53 Michael Hanselmann
    """
8953 e311ed53 Michael Hanselmann
    nodelist = self.cfg.GetNodeList()
8954 e311ed53 Michael Hanselmann
    nodelist.remove(self.dst_node.name)
8955 e311ed53 Michael Hanselmann
8956 e311ed53 Michael Hanselmann
    # on one-node clusters nodelist will be empty after the removal
8957 e311ed53 Michael Hanselmann
    # if we proceed the backup would be removed because OpQueryExports
8958 e311ed53 Michael Hanselmann
    # substitutes an empty list with the full cluster node list.
8959 e311ed53 Michael Hanselmann
    iname = self.instance.name
8960 e311ed53 Michael Hanselmann
    if nodelist:
8961 e311ed53 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
8962 e311ed53 Michael Hanselmann
      exportlist = self.rpc.call_export_list(nodelist)
8963 e311ed53 Michael Hanselmann
      for node in exportlist:
8964 e311ed53 Michael Hanselmann
        if exportlist[node].fail_msg:
8965 e311ed53 Michael Hanselmann
          continue
8966 e311ed53 Michael Hanselmann
        if iname in exportlist[node].payload:
8967 e311ed53 Michael Hanselmann
          msg = self.rpc.call_export_remove(node, iname).fail_msg
8968 e311ed53 Michael Hanselmann
          if msg:
8969 e311ed53 Michael Hanselmann
            self.LogWarning("Could not remove older export for instance %s"
8970 e311ed53 Michael Hanselmann
                            " on node %s: %s", iname, node, msg)
8971 e311ed53 Michael Hanselmann
8972 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
8973 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
8974 a8083063 Iustin Pop

8975 a8083063 Iustin Pop
    """
8976 a8083063 Iustin Pop
    instance = self.instance
8977 a8083063 Iustin Pop
    dst_node = self.dst_node
8978 a8083063 Iustin Pop
    src_node = instance.primary_node
8979 37972df0 Michael Hanselmann
8980 a8083063 Iustin Pop
    if self.op.shutdown:
8981 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
8982 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
8983 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
8984 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
8985 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
8986 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
8987 a8083063 Iustin Pop
8988 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
8989 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
8990 998c712c Iustin Pop
    for disk in instance.disks:
8991 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
8992 998c712c Iustin Pop
8993 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
8994 3e53a60b Michael Hanselmann
8995 3e53a60b Michael Hanselmann
    if activate_disks:
8996 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
8997 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
8998 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
8999 3e53a60b Michael Hanselmann
9000 a8083063 Iustin Pop
    try:
9001 3e53a60b Michael Hanselmann
      # per-disk results
9002 3e53a60b Michael Hanselmann
      dresults = []
9003 e311ed53 Michael Hanselmann
      removed_snaps = [False] * len(instance.disks)
9004 e311ed53 Michael Hanselmann
9005 e311ed53 Michael Hanselmann
      snap_disks = None
9006 3e53a60b Michael Hanselmann
      try:
9007 e311ed53 Michael Hanselmann
        try:
9008 e311ed53 Michael Hanselmann
          snap_disks = self._CreateSnapshots(feedback_fn)
9009 e311ed53 Michael Hanselmann
        finally:
9010 e311ed53 Michael Hanselmann
          if self.op.shutdown and instance.admin_up:
9011 e311ed53 Michael Hanselmann
            feedback_fn("Starting instance %s" % instance.name)
9012 e311ed53 Michael Hanselmann
            result = self.rpc.call_instance_start(src_node, instance,
9013 e311ed53 Michael Hanselmann
                                                  None, None)
9014 e311ed53 Michael Hanselmann
            msg = result.fail_msg
9015 e311ed53 Michael Hanselmann
            if msg:
9016 e311ed53 Michael Hanselmann
              _ShutdownInstanceDisks(self, instance)
9017 e311ed53 Michael Hanselmann
              raise errors.OpExecError("Could not start instance: %s" % msg)
9018 37972df0 Michael Hanselmann
9019 e311ed53 Michael Hanselmann
        assert len(snap_disks) == len(instance.disks)
9020 e311ed53 Michael Hanselmann
        assert len(removed_snaps) == len(instance.disks)
9021 e311ed53 Michael Hanselmann
9022 e311ed53 Michael Hanselmann
        # TODO: check for size
9023 e311ed53 Michael Hanselmann
9024 e311ed53 Michael Hanselmann
        cluster_name = self.cfg.GetClusterName()
9025 e311ed53 Michael Hanselmann
        for idx, dev in enumerate(snap_disks):
9026 e311ed53 Michael Hanselmann
          feedback_fn("Exporting snapshot %s from %s to %s" %
9027 e311ed53 Michael Hanselmann
                      (idx, src_node, dst_node.name))
9028 e311ed53 Michael Hanselmann
          if dev:
9029 e311ed53 Michael Hanselmann
            # FIXME: pass debug from opcode to backend
9030 e311ed53 Michael Hanselmann
            result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
9031 e311ed53 Michael Hanselmann
                                                   instance, cluster_name,
9032 e311ed53 Michael Hanselmann
                                                   idx, self.op.debug_level)
9033 e311ed53 Michael Hanselmann
            msg = result.fail_msg
9034 e311ed53 Michael Hanselmann
            if msg:
9035 e311ed53 Michael Hanselmann
              self.LogWarning("Could not export disk/%s from node %s to"
9036 e311ed53 Michael Hanselmann
                              " node %s: %s", idx, src_node, dst_node.name, msg)
9037 e311ed53 Michael Hanselmann
              dresults.append(False)
9038 e311ed53 Michael Hanselmann
            else:
9039 e311ed53 Michael Hanselmann
              dresults.append(True)
9040 e311ed53 Michael Hanselmann
9041 e311ed53 Michael Hanselmann
            # Remove snapshot
9042 e311ed53 Michael Hanselmann
            if self._RemoveSnapshot(feedback_fn, snap_disks, idx):
9043 e311ed53 Michael Hanselmann
              removed_snaps[idx] = True
9044 3e53a60b Michael Hanselmann
          else:
9045 e311ed53 Michael Hanselmann
            dresults.append(False)
9046 a8083063 Iustin Pop
9047 e311ed53 Michael Hanselmann
        assert len(dresults) == len(instance.disks)
9048 e311ed53 Michael Hanselmann
9049 e311ed53 Michael Hanselmann
        # Check for backwards compatibility
9050 e311ed53 Michael Hanselmann
        assert compat.all(isinstance(i, bool) for i in dresults), \
9051 e311ed53 Michael Hanselmann
               "Not all results are boolean: %r" % dresults
9052 e311ed53 Michael Hanselmann
9053 e311ed53 Michael Hanselmann
        feedback_fn("Finalizing export on %s" % dst_node.name)
9054 e311ed53 Michael Hanselmann
        result = self.rpc.call_finalize_export(dst_node.name, instance,
9055 e311ed53 Michael Hanselmann
                                               snap_disks)
9056 e311ed53 Michael Hanselmann
        msg = result.fail_msg
9057 e311ed53 Michael Hanselmann
        fin_resu = not msg
9058 e311ed53 Michael Hanselmann
        if msg:
9059 e311ed53 Michael Hanselmann
          self.LogWarning("Could not finalize export for instance %s"
9060 e311ed53 Michael Hanselmann
                          " on node %s: %s", instance.name, dst_node.name, msg)
9061 e311ed53 Michael Hanselmann
9062 e311ed53 Michael Hanselmann
      finally:
9063 e311ed53 Michael Hanselmann
        # Remove all snapshots
9064 e311ed53 Michael Hanselmann
        assert len(removed_snaps) == len(instance.disks)
9065 e311ed53 Michael Hanselmann
        for idx, removed in enumerate(removed_snaps):
9066 e311ed53 Michael Hanselmann
          if not removed:
9067 e311ed53 Michael Hanselmann
            self._RemoveSnapshot(feedback_fn, snap_disks, idx)
9068 3e53a60b Michael Hanselmann
9069 3e53a60b Michael Hanselmann
    finally:
9070 3e53a60b Michael Hanselmann
      if activate_disks:
9071 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
9072 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
9073 a8083063 Iustin Pop
9074 e311ed53 Michael Hanselmann
    self._CleanupExports(feedback_fn)
9075 a8083063 Iustin Pop
9076 084f05a5 Iustin Pop
    return fin_resu, dresults
9077 5c947f38 Iustin Pop
9078 5c947f38 Iustin Pop
9079 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
9080 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
9081 9ac99fda Guido Trotter

9082 9ac99fda Guido Trotter
  """
9083 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
9084 3656b3af Guido Trotter
  REQ_BGL = False
9085 3656b3af Guido Trotter
9086 3656b3af Guido Trotter
  def ExpandNames(self):
9087 3656b3af Guido Trotter
    self.needed_locks = {}
9088 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
9089 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
9090 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
9091 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9092 9ac99fda Guido Trotter
9093 9ac99fda Guido Trotter
  def CheckPrereq(self):
9094 9ac99fda Guido Trotter
    """Check prerequisites.
9095 9ac99fda Guido Trotter
    """
9096 9ac99fda Guido Trotter
    pass
9097 9ac99fda Guido Trotter
9098 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
9099 9ac99fda Guido Trotter
    """Remove any export.
9100 9ac99fda Guido Trotter

9101 9ac99fda Guido Trotter
    """
9102 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
9103 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
9104 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
9105 9ac99fda Guido Trotter
    fqdn_warn = False
9106 9ac99fda Guido Trotter
    if not instance_name:
9107 9ac99fda Guido Trotter
      fqdn_warn = True
9108 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
9109 9ac99fda Guido Trotter
9110 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
9111 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
9112 9ac99fda Guido Trotter
    found = False
9113 9ac99fda Guido Trotter
    for node in exportlist:
9114 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
9115 1b7bfbb7 Iustin Pop
      if msg:
9116 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
9117 781de953 Iustin Pop
        continue
9118 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
9119 9ac99fda Guido Trotter
        found = True
9120 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
9121 4c4e4e1e Iustin Pop
        msg = result.fail_msg
9122 35fbcd11 Iustin Pop
        if msg:
9123 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
9124 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
9125 9ac99fda Guido Trotter
9126 9ac99fda Guido Trotter
    if fqdn_warn and not found:
9127 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
9128 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
9129 9ac99fda Guido Trotter
                  " Domain Name.")
9130 9ac99fda Guido Trotter
9131 9ac99fda Guido Trotter
9132 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
9133 5c947f38 Iustin Pop
  """Generic tags LU.
9134 5c947f38 Iustin Pop

9135 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
9136 5c947f38 Iustin Pop

9137 5c947f38 Iustin Pop
  """
9138 5c947f38 Iustin Pop
9139 8646adce Guido Trotter
  def ExpandNames(self):
9140 8646adce Guido Trotter
    self.needed_locks = {}
9141 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
9142 cf26a87a Iustin Pop
      self.op.name = _ExpandNodeName(self.cfg, self.op.name)
9143 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.op.name
9144 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
9145 cf26a87a Iustin Pop
      self.op.name = _ExpandInstanceName(self.cfg, self.op.name)
9146 cf26a87a Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
9147 8646adce Guido Trotter
9148 8646adce Guido Trotter
  def CheckPrereq(self):
9149 8646adce Guido Trotter
    """Check prerequisites.
9150 8646adce Guido Trotter

9151 8646adce Guido Trotter
    """
9152 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
9153 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
9154 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
9155 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
9156 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
9157 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
9158 5c947f38 Iustin Pop
    else:
9159 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
9160 5c983ee5 Iustin Pop
                                 str(self.op.kind), errors.ECODE_INVAL)
9161 5c947f38 Iustin Pop
9162 5c947f38 Iustin Pop
9163 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
9164 5c947f38 Iustin Pop
  """Returns the tags of a given object.
9165 5c947f38 Iustin Pop

9166 5c947f38 Iustin Pop
  """
9167 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
9168 8646adce Guido Trotter
  REQ_BGL = False
9169 5c947f38 Iustin Pop
9170 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9171 5c947f38 Iustin Pop
    """Returns the tag list.
9172 5c947f38 Iustin Pop

9173 5c947f38 Iustin Pop
    """
9174 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
9175 5c947f38 Iustin Pop
9176 5c947f38 Iustin Pop
9177 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
9178 73415719 Iustin Pop
  """Searches the tags for a given pattern.
9179 73415719 Iustin Pop

9180 73415719 Iustin Pop
  """
9181 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
9182 8646adce Guido Trotter
  REQ_BGL = False
9183 8646adce Guido Trotter
9184 8646adce Guido Trotter
  def ExpandNames(self):
9185 8646adce Guido Trotter
    self.needed_locks = {}
9186 73415719 Iustin Pop
9187 73415719 Iustin Pop
  def CheckPrereq(self):
9188 73415719 Iustin Pop
    """Check prerequisites.
9189 73415719 Iustin Pop

9190 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
9191 73415719 Iustin Pop

9192 73415719 Iustin Pop
    """
9193 73415719 Iustin Pop
    try:
9194 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
9195 73415719 Iustin Pop
    except re.error, err:
9196 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
9197 5c983ee5 Iustin Pop
                                 (self.op.pattern, err), errors.ECODE_INVAL)
9198 73415719 Iustin Pop
9199 73415719 Iustin Pop
  def Exec(self, feedback_fn):
9200 73415719 Iustin Pop
    """Returns the tag list.
9201 73415719 Iustin Pop

9202 73415719 Iustin Pop
    """
9203 73415719 Iustin Pop
    cfg = self.cfg
9204 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
9205 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
9206 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
9207 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
9208 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
9209 73415719 Iustin Pop
    results = []
9210 73415719 Iustin Pop
    for path, target in tgts:
9211 73415719 Iustin Pop
      for tag in target.GetTags():
9212 73415719 Iustin Pop
        if self.re.search(tag):
9213 73415719 Iustin Pop
          results.append((path, tag))
9214 73415719 Iustin Pop
    return results
9215 73415719 Iustin Pop
9216 73415719 Iustin Pop
9217 f27302fa Iustin Pop
class LUAddTags(TagsLU):
9218 5c947f38 Iustin Pop
  """Sets a tag on a given object.
9219 5c947f38 Iustin Pop

9220 5c947f38 Iustin Pop
  """
9221 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
9222 8646adce Guido Trotter
  REQ_BGL = False
9223 5c947f38 Iustin Pop
9224 5c947f38 Iustin Pop
  def CheckPrereq(self):
9225 5c947f38 Iustin Pop
    """Check prerequisites.
9226 5c947f38 Iustin Pop

9227 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
9228 5c947f38 Iustin Pop

9229 5c947f38 Iustin Pop
    """
9230 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9231 f27302fa Iustin Pop
    for tag in self.op.tags:
9232 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9233 5c947f38 Iustin Pop
9234 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9235 5c947f38 Iustin Pop
    """Sets the tag.
9236 5c947f38 Iustin Pop

9237 5c947f38 Iustin Pop
    """
9238 5c947f38 Iustin Pop
    try:
9239 f27302fa Iustin Pop
      for tag in self.op.tags:
9240 f27302fa Iustin Pop
        self.target.AddTag(tag)
9241 5c947f38 Iustin Pop
    except errors.TagError, err:
9242 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
9243 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9244 5c947f38 Iustin Pop
9245 5c947f38 Iustin Pop
9246 f27302fa Iustin Pop
class LUDelTags(TagsLU):
9247 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
9248 5c947f38 Iustin Pop

9249 5c947f38 Iustin Pop
  """
9250 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
9251 8646adce Guido Trotter
  REQ_BGL = False
9252 5c947f38 Iustin Pop
9253 5c947f38 Iustin Pop
  def CheckPrereq(self):
9254 5c947f38 Iustin Pop
    """Check prerequisites.
9255 5c947f38 Iustin Pop

9256 5c947f38 Iustin Pop
    This checks that we have the given tag.
9257 5c947f38 Iustin Pop

9258 5c947f38 Iustin Pop
    """
9259 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
9260 f27302fa Iustin Pop
    for tag in self.op.tags:
9261 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
9262 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
9263 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
9264 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
9265 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
9266 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
9267 f27302fa Iustin Pop
      diff_names.sort()
9268 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
9269 5c983ee5 Iustin Pop
                                 (",".join(diff_names)), errors.ECODE_NOENT)
9270 5c947f38 Iustin Pop
9271 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
9272 5c947f38 Iustin Pop
    """Remove the tag from the object.
9273 5c947f38 Iustin Pop

9274 5c947f38 Iustin Pop
    """
9275 f27302fa Iustin Pop
    for tag in self.op.tags:
9276 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
9277 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
9278 06009e27 Iustin Pop
9279 0eed6e61 Guido Trotter
9280 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
9281 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
9282 06009e27 Iustin Pop

9283 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
9284 06009e27 Iustin Pop
  time.
9285 06009e27 Iustin Pop

9286 06009e27 Iustin Pop
  """
9287 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
9288 fbe9022f Guido Trotter
  REQ_BGL = False
9289 06009e27 Iustin Pop
9290 fbe9022f Guido Trotter
  def ExpandNames(self):
9291 fbe9022f Guido Trotter
    """Expand names and set required locks.
9292 06009e27 Iustin Pop

9293 fbe9022f Guido Trotter
    This expands the node list, if any.
9294 06009e27 Iustin Pop

9295 06009e27 Iustin Pop
    """
9296 fbe9022f Guido Trotter
    self.needed_locks = {}
9297 06009e27 Iustin Pop
    if self.op.on_nodes:
9298 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
9299 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
9300 fbe9022f Guido Trotter
      # more information.
9301 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
9302 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
9303 fbe9022f Guido Trotter
9304 fbe9022f Guido Trotter
  def CheckPrereq(self):
9305 fbe9022f Guido Trotter
    """Check prerequisites.
9306 fbe9022f Guido Trotter

9307 fbe9022f Guido Trotter
    """
9308 06009e27 Iustin Pop
9309 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
9310 06009e27 Iustin Pop
    """Do the actual sleep.
9311 06009e27 Iustin Pop

9312 06009e27 Iustin Pop
    """
9313 06009e27 Iustin Pop
    if self.op.on_master:
9314 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
9315 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
9316 06009e27 Iustin Pop
    if self.op.on_nodes:
9317 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
9318 06009e27 Iustin Pop
      for node, node_result in result.items():
9319 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
9320 d61df03e Iustin Pop
9321 d61df03e Iustin Pop
9322 d1c2dd75 Iustin Pop
class IAllocator(object):
9323 d1c2dd75 Iustin Pop
  """IAllocator framework.
9324 d61df03e Iustin Pop

9325 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
9326 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
9327 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
9328 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
9329 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
9330 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
9331 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
9332 d1c2dd75 Iustin Pop
      easy usage
9333 d61df03e Iustin Pop

9334 d61df03e Iustin Pop
  """
9335 7260cfbe Iustin Pop
  # pylint: disable-msg=R0902
9336 7260cfbe Iustin Pop
  # lots of instance attributes
9337 29859cb7 Iustin Pop
  _ALLO_KEYS = [
9338 8d3f86a0 Iustin Pop
    "name", "mem_size", "disks", "disk_template",
9339 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
9340 d1c2dd75 Iustin Pop
    ]
9341 29859cb7 Iustin Pop
  _RELO_KEYS = [
9342 8d3f86a0 Iustin Pop
    "name", "relocate_from",
9343 29859cb7 Iustin Pop
    ]
9344 7f60a422 Iustin Pop
  _EVAC_KEYS = [
9345 7f60a422 Iustin Pop
    "evac_nodes",
9346 7f60a422 Iustin Pop
    ]
9347 d1c2dd75 Iustin Pop
9348 8d3f86a0 Iustin Pop
  def __init__(self, cfg, rpc, mode, **kwargs):
9349 923ddac0 Michael Hanselmann
    self.cfg = cfg
9350 923ddac0 Michael Hanselmann
    self.rpc = rpc
9351 d1c2dd75 Iustin Pop
    # init buffer variables
9352 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
9353 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
9354 29859cb7 Iustin Pop
    self.mode = mode
9355 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
9356 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
9357 a0add446 Iustin Pop
    self.hypervisor = None
9358 29859cb7 Iustin Pop
    self.relocate_from = None
9359 8d3f86a0 Iustin Pop
    self.name = None
9360 7f60a422 Iustin Pop
    self.evac_nodes = None
9361 27579978 Iustin Pop
    # computed fields
9362 27579978 Iustin Pop
    self.required_nodes = None
9363 d1c2dd75 Iustin Pop
    # init result fields
9364 680f0a89 Iustin Pop
    self.success = self.info = self.result = None
9365 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9366 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
9367 9757cc90 Iustin Pop
      fn = self._AddNewInstance
9368 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9369 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
9370 9757cc90 Iustin Pop
      fn = self._AddRelocateInstance
9371 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9372 7f60a422 Iustin Pop
      keyset = self._EVAC_KEYS
9373 7f60a422 Iustin Pop
      fn = self._AddEvacuateNodes
9374 29859cb7 Iustin Pop
    else:
9375 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
9376 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
9377 d1c2dd75 Iustin Pop
    for key in kwargs:
9378 29859cb7 Iustin Pop
      if key not in keyset:
9379 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
9380 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9381 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
9382 7f60a422 Iustin Pop
9383 29859cb7 Iustin Pop
    for key in keyset:
9384 d1c2dd75 Iustin Pop
      if key not in kwargs:
9385 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
9386 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
9387 9757cc90 Iustin Pop
    self._BuildInputData(fn)
9388 d1c2dd75 Iustin Pop
9389 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
9390 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
9391 d1c2dd75 Iustin Pop

9392 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
9393 d1c2dd75 Iustin Pop

9394 d1c2dd75 Iustin Pop
    """
9395 923ddac0 Michael Hanselmann
    cfg = self.cfg
9396 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
9397 d1c2dd75 Iustin Pop
    # cluster data
9398 d1c2dd75 Iustin Pop
    data = {
9399 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
9400 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
9401 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
9402 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
9403 d1c2dd75 Iustin Pop
      # we don't have job IDs
9404 d61df03e Iustin Pop
      }
9405 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
9406 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
9407 6286519f Iustin Pop
9408 d1c2dd75 Iustin Pop
    # node data
9409 d1c2dd75 Iustin Pop
    node_results = {}
9410 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
9411 8cc7e742 Guido Trotter
9412 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
9413 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
9414 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
9415 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
9416 7f60a422 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_MEVAC:
9417 7f60a422 Iustin Pop
      hypervisor_name = cluster_info.enabled_hypervisors[0]
9418 8cc7e742 Guido Trotter
9419 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
9420 923ddac0 Michael Hanselmann
                                        hypervisor_name)
9421 923ddac0 Michael Hanselmann
    node_iinfo = \
9422 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
9423 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
9424 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
9425 1325da74 Iustin Pop
      # first fill in static (config-based) values
9426 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
9427 d1c2dd75 Iustin Pop
      pnr = {
9428 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
9429 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
9430 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
9431 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
9432 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
9433 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
9434 d1c2dd75 Iustin Pop
        }
9435 1325da74 Iustin Pop
9436 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
9437 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
9438 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
9439 4c4e4e1e Iustin Pop
                                nname)
9440 070e998b Iustin Pop
        remote_info = nresult.payload
9441 b142ef15 Iustin Pop
9442 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
9443 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
9444 1325da74 Iustin Pop
          if attr not in remote_info:
9445 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
9446 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
9447 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
9448 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
9449 070e998b Iustin Pop
                                     " for '%s': %s" %
9450 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
9451 1325da74 Iustin Pop
        # compute memory used by primary instances
9452 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
9453 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
9454 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
9455 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
9456 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
9457 1325da74 Iustin Pop
              i_used_mem = 0
9458 1325da74 Iustin Pop
            else:
9459 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
9460 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
9461 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
9462 1325da74 Iustin Pop
9463 1325da74 Iustin Pop
            if iinfo.admin_up:
9464 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
9465 1325da74 Iustin Pop
9466 1325da74 Iustin Pop
        # compute memory used by instances
9467 1325da74 Iustin Pop
        pnr_dyn = {
9468 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
9469 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
9470 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
9471 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
9472 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
9473 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
9474 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
9475 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
9476 1325da74 Iustin Pop
          }
9477 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
9478 1325da74 Iustin Pop
9479 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
9480 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
9481 d1c2dd75 Iustin Pop
9482 d1c2dd75 Iustin Pop
    # instance data
9483 d1c2dd75 Iustin Pop
    instance_data = {}
9484 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
9485 a9fe7e8f Guido Trotter
      nic_data = []
9486 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
9487 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
9488 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
9489 a9fe7e8f Guido Trotter
            nic.nicparams)
9490 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
9491 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
9492 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
9493 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
9494 a9fe7e8f Guido Trotter
                   }
9495 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
9496 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
9497 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
9498 d1c2dd75 Iustin Pop
      pir = {
9499 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
9500 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
9501 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
9502 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
9503 d1c2dd75 Iustin Pop
        "os": iinfo.os,
9504 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
9505 d1c2dd75 Iustin Pop
        "nics": nic_data,
9506 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
9507 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
9508 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
9509 d1c2dd75 Iustin Pop
        }
9510 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
9511 88ae4f85 Iustin Pop
                                                 pir["disks"])
9512 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
9513 d61df03e Iustin Pop
9514 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
9515 d61df03e Iustin Pop
9516 d1c2dd75 Iustin Pop
    self.in_data = data
9517 d61df03e Iustin Pop
9518 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
9519 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
9520 d61df03e Iustin Pop

9521 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
9522 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9523 d61df03e Iustin Pop

9524 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9525 d1c2dd75 Iustin Pop
    done.
9526 d61df03e Iustin Pop

9527 d1c2dd75 Iustin Pop
    """
9528 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
9529 d1c2dd75 Iustin Pop
9530 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
9531 27579978 Iustin Pop
      self.required_nodes = 2
9532 27579978 Iustin Pop
    else:
9533 27579978 Iustin Pop
      self.required_nodes = 1
9534 d1c2dd75 Iustin Pop
    request = {
9535 d1c2dd75 Iustin Pop
      "name": self.name,
9536 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
9537 d1c2dd75 Iustin Pop
      "tags": self.tags,
9538 d1c2dd75 Iustin Pop
      "os": self.os,
9539 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
9540 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
9541 d1c2dd75 Iustin Pop
      "disks": self.disks,
9542 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
9543 d1c2dd75 Iustin Pop
      "nics": self.nics,
9544 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9545 d1c2dd75 Iustin Pop
      }
9546 9757cc90 Iustin Pop
    return request
9547 298fe380 Iustin Pop
9548 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
9549 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
9550 298fe380 Iustin Pop

9551 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
9552 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
9553 d61df03e Iustin Pop

9554 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
9555 d1c2dd75 Iustin Pop
    done.
9556 d61df03e Iustin Pop

9557 d1c2dd75 Iustin Pop
    """
9558 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
9559 27579978 Iustin Pop
    if instance is None:
9560 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
9561 27579978 Iustin Pop
                                   " IAllocator" % self.name)
9562 27579978 Iustin Pop
9563 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
9564 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances",
9565 5c983ee5 Iustin Pop
                                 errors.ECODE_INVAL)
9566 27579978 Iustin Pop
9567 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
9568 5c983ee5 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node",
9569 5c983ee5 Iustin Pop
                                 errors.ECODE_STATE)
9570 2a139bb0 Iustin Pop
9571 27579978 Iustin Pop
    self.required_nodes = 1
9572 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
9573 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
9574 27579978 Iustin Pop
9575 d1c2dd75 Iustin Pop
    request = {
9576 d1c2dd75 Iustin Pop
      "name": self.name,
9577 27579978 Iustin Pop
      "disk_space_total": disk_space,
9578 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
9579 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
9580 d1c2dd75 Iustin Pop
      }
9581 9757cc90 Iustin Pop
    return request
9582 d61df03e Iustin Pop
9583 7f60a422 Iustin Pop
  def _AddEvacuateNodes(self):
9584 7f60a422 Iustin Pop
    """Add evacuate nodes data to allocator structure.
9585 7f60a422 Iustin Pop

9586 7f60a422 Iustin Pop
    """
9587 7f60a422 Iustin Pop
    request = {
9588 7f60a422 Iustin Pop
      "evac_nodes": self.evac_nodes
9589 7f60a422 Iustin Pop
      }
9590 7f60a422 Iustin Pop
    return request
9591 7f60a422 Iustin Pop
9592 9757cc90 Iustin Pop
  def _BuildInputData(self, fn):
9593 d1c2dd75 Iustin Pop
    """Build input data structures.
9594 d61df03e Iustin Pop

9595 d1c2dd75 Iustin Pop
    """
9596 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
9597 d61df03e Iustin Pop
9598 9757cc90 Iustin Pop
    request = fn()
9599 9757cc90 Iustin Pop
    request["type"] = self.mode
9600 9757cc90 Iustin Pop
    self.in_data["request"] = request
9601 d61df03e Iustin Pop
9602 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
9603 d61df03e Iustin Pop
9604 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
9605 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
9606 298fe380 Iustin Pop

9607 d1c2dd75 Iustin Pop
    """
9608 72737a7f Iustin Pop
    if call_fn is None:
9609 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
9610 298fe380 Iustin Pop
9611 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
9612 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
9613 8d528b7c Iustin Pop
9614 87f5c298 Iustin Pop
    self.out_text = result.payload
9615 d1c2dd75 Iustin Pop
    if validate:
9616 d1c2dd75 Iustin Pop
      self._ValidateResult()
9617 298fe380 Iustin Pop
9618 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
9619 d1c2dd75 Iustin Pop
    """Process the allocator results.
9620 538475ca Iustin Pop

9621 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
9622 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
9623 538475ca Iustin Pop

9624 d1c2dd75 Iustin Pop
    """
9625 d1c2dd75 Iustin Pop
    try:
9626 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
9627 d1c2dd75 Iustin Pop
    except Exception, err:
9628 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
9629 d1c2dd75 Iustin Pop
9630 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
9631 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
9632 538475ca Iustin Pop
9633 680f0a89 Iustin Pop
    # TODO: remove backwards compatiblity in later versions
9634 680f0a89 Iustin Pop
    if "nodes" in rdict and "result" not in rdict:
9635 680f0a89 Iustin Pop
      rdict["result"] = rdict["nodes"]
9636 680f0a89 Iustin Pop
      del rdict["nodes"]
9637 680f0a89 Iustin Pop
9638 680f0a89 Iustin Pop
    for key in "success", "info", "result":
9639 d1c2dd75 Iustin Pop
      if key not in rdict:
9640 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
9641 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
9642 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
9643 538475ca Iustin Pop
9644 680f0a89 Iustin Pop
    if not isinstance(rdict["result"], list):
9645 680f0a89 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'result' key"
9646 d1c2dd75 Iustin Pop
                               " is not a list")
9647 d1c2dd75 Iustin Pop
    self.out_data = rdict
9648 538475ca Iustin Pop
9649 538475ca Iustin Pop
9650 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
9651 d61df03e Iustin Pop
  """Run allocator tests.
9652 d61df03e Iustin Pop

9653 d61df03e Iustin Pop
  This LU runs the allocator tests
9654 d61df03e Iustin Pop

9655 d61df03e Iustin Pop
  """
9656 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
9657 d61df03e Iustin Pop
9658 d61df03e Iustin Pop
  def CheckPrereq(self):
9659 d61df03e Iustin Pop
    """Check prerequisites.
9660 d61df03e Iustin Pop

9661 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
9662 d61df03e Iustin Pop

9663 d61df03e Iustin Pop
    """
9664 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9665 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
9666 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
9667 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
9668 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
9669 5c983ee5 Iustin Pop
                                     attr, errors.ECODE_INVAL)
9670 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
9671 d61df03e Iustin Pop
      if iname is not None:
9672 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
9673 5c983ee5 Iustin Pop
                                   iname, errors.ECODE_EXISTS)
9674 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
9675 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'",
9676 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9677 d61df03e Iustin Pop
      for row in self.op.nics:
9678 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9679 d61df03e Iustin Pop
            "mac" not in row or
9680 d61df03e Iustin Pop
            "ip" not in row or
9681 d61df03e Iustin Pop
            "bridge" not in row):
9682 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'nics'"
9683 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9684 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
9685 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'",
9686 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9687 d61df03e Iustin Pop
      for row in self.op.disks:
9688 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
9689 d61df03e Iustin Pop
            "size" not in row or
9690 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
9691 d61df03e Iustin Pop
            "mode" not in row or
9692 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
9693 5c983ee5 Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
9694 5c983ee5 Iustin Pop
                                     " parameter", errors.ECODE_INVAL)
9695 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
9696 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
9697 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9698 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
9699 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input",
9700 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9701 cf26a87a Iustin Pop
      fname = _ExpandInstanceName(self.cfg, self.op.name)
9702 d61df03e Iustin Pop
      self.op.name = fname
9703 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
9704 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9705 823a72bc Iustin Pop
      if not hasattr(self.op, "evac_nodes"):
9706 823a72bc Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'evac_nodes' on"
9707 823a72bc Iustin Pop
                                   " opcode input", errors.ECODE_INVAL)
9708 d61df03e Iustin Pop
    else:
9709 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
9710 5c983ee5 Iustin Pop
                                 self.op.mode, errors.ECODE_INVAL)
9711 d61df03e Iustin Pop
9712 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
9713 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
9714 5c983ee5 Iustin Pop
        raise errors.OpPrereqError("Missing allocator name",
9715 5c983ee5 Iustin Pop
                                   errors.ECODE_INVAL)
9716 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
9717 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
9718 5c983ee5 Iustin Pop
                                 self.op.direction, errors.ECODE_INVAL)
9719 d61df03e Iustin Pop
9720 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
9721 d61df03e Iustin Pop
    """Run the allocator test.
9722 d61df03e Iustin Pop

9723 d61df03e Iustin Pop
    """
9724 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
9725 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9726 29859cb7 Iustin Pop
                       mode=self.op.mode,
9727 29859cb7 Iustin Pop
                       name=self.op.name,
9728 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
9729 29859cb7 Iustin Pop
                       disks=self.op.disks,
9730 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
9731 29859cb7 Iustin Pop
                       os=self.op.os,
9732 29859cb7 Iustin Pop
                       tags=self.op.tags,
9733 29859cb7 Iustin Pop
                       nics=self.op.nics,
9734 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
9735 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
9736 29859cb7 Iustin Pop
                       )
9737 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
9738 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
9739 29859cb7 Iustin Pop
                       mode=self.op.mode,
9740 29859cb7 Iustin Pop
                       name=self.op.name,
9741 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
9742 29859cb7 Iustin Pop
                       )
9743 823a72bc Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_MEVAC:
9744 823a72bc Iustin Pop
      ial = IAllocator(self.cfg, self.rpc,
9745 823a72bc Iustin Pop
                       mode=self.op.mode,
9746 823a72bc Iustin Pop
                       evac_nodes=self.op.evac_nodes)
9747 823a72bc Iustin Pop
    else:
9748 823a72bc Iustin Pop
      raise errors.ProgrammerError("Uncatched mode %s in"
9749 823a72bc Iustin Pop
                                   " LUTestAllocator.Exec", self.op.mode)
9750 d61df03e Iustin Pop
9751 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
9752 d1c2dd75 Iustin Pop
      result = ial.in_text
9753 298fe380 Iustin Pop
    else:
9754 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
9755 d1c2dd75 Iustin Pop
      result = ial.out_text
9756 298fe380 Iustin Pop
    return result