Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 159d4ec6

History | View | Annotate | Download (296 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 6fd35c4d Michael Hanselmann
    - implement CheckPrereq (except when tasklets are used)
51 6fd35c4d Michael Hanselmann
    - implement Exec (except when tasklets are used)
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 2bb5c911 Michael Hanselmann
    self.LogStep = processor.LogStep
93 20777413 Iustin Pop
    # support for dry-run
94 20777413 Iustin Pop
    self.dry_run_result = None
95 c92b310a Michael Hanselmann
96 6fd35c4d Michael Hanselmann
    # Tasklets
97 3a012b41 Michael Hanselmann
    self.tasklets = None
98 6fd35c4d Michael Hanselmann
99 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
100 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
101 a8083063 Iustin Pop
      if attr_val is None:
102 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
103 3ecf6786 Iustin Pop
                                   attr_name)
104 6fd35c4d Michael Hanselmann
105 4be4691d Iustin Pop
    self.CheckArguments()
106 a8083063 Iustin Pop
107 c92b310a Michael Hanselmann
  def __GetSSH(self):
108 c92b310a Michael Hanselmann
    """Returns the SshRunner object
109 c92b310a Michael Hanselmann

110 c92b310a Michael Hanselmann
    """
111 c92b310a Michael Hanselmann
    if not self.__ssh:
112 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
113 c92b310a Michael Hanselmann
    return self.__ssh
114 c92b310a Michael Hanselmann
115 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
116 c92b310a Michael Hanselmann
117 4be4691d Iustin Pop
  def CheckArguments(self):
118 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
121 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
122 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
123 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
126 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
127 4be4691d Iustin Pop
        waited for them)
128 4be4691d Iustin Pop

129 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
130 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
131 4be4691d Iustin Pop

132 4be4691d Iustin Pop
    """
133 4be4691d Iustin Pop
    pass
134 4be4691d Iustin Pop
135 d465bdc8 Guido Trotter
  def ExpandNames(self):
136 d465bdc8 Guido Trotter
    """Expand names for this LU.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
139 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
140 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
141 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
142 d465bdc8 Guido Trotter

143 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
144 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
145 d465bdc8 Guido Trotter
    as values. Rules:
146 e4376078 Iustin Pop

147 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
148 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
149 e4376078 Iustin Pop
      - don't put anything for the BGL level
150 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
151 d465bdc8 Guido Trotter

152 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
153 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
154 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
155 3977a4c1 Guido Trotter

156 6fd35c4d Michael Hanselmann
    This function can also define a list of tasklets, which then will be
157 6fd35c4d Michael Hanselmann
    executed in order instead of the usual LU-level CheckPrereq and Exec
158 6fd35c4d Michael Hanselmann
    functions, if those are not defined by the LU.
159 6fd35c4d Michael Hanselmann

160 e4376078 Iustin Pop
    Examples::
161 e4376078 Iustin Pop

162 e4376078 Iustin Pop
      # Acquire all nodes and one instance
163 e4376078 Iustin Pop
      self.needed_locks = {
164 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
165 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
166 e4376078 Iustin Pop
      }
167 e4376078 Iustin Pop
      # Acquire just two nodes
168 e4376078 Iustin Pop
      self.needed_locks = {
169 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
170 e4376078 Iustin Pop
      }
171 e4376078 Iustin Pop
      # Acquire no locks
172 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
173 d465bdc8 Guido Trotter

174 d465bdc8 Guido Trotter
    """
175 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
176 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
177 d465bdc8 Guido Trotter
    # time.
178 d465bdc8 Guido Trotter
    if self.REQ_BGL:
179 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
180 d465bdc8 Guido Trotter
    else:
181 d465bdc8 Guido Trotter
      raise NotImplementedError
182 d465bdc8 Guido Trotter
183 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
184 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
187 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
188 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
189 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
190 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
191 fb8dcb62 Guido Trotter
    default it does nothing.
192 fb8dcb62 Guido Trotter

193 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
194 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
195 fb8dcb62 Guido Trotter

196 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
197 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
198 fb8dcb62 Guido Trotter

199 fb8dcb62 Guido Trotter
    """
200 fb8dcb62 Guido Trotter
201 a8083063 Iustin Pop
  def CheckPrereq(self):
202 a8083063 Iustin Pop
    """Check prerequisites for this LU.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
205 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
206 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
207 a8083063 Iustin Pop
    allowed.
208 a8083063 Iustin Pop

209 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
210 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
213 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
217 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
218 abae1b2b Michael Hanselmann
        logging.debug("Checking prerequisites for tasklet %s/%s",
219 abae1b2b Michael Hanselmann
                      idx + 1, len(self.tasklets))
220 6fd35c4d Michael Hanselmann
        tl.CheckPrereq()
221 6fd35c4d Michael Hanselmann
    else:
222 6fd35c4d Michael Hanselmann
      raise NotImplementedError
223 a8083063 Iustin Pop
224 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
225 a8083063 Iustin Pop
    """Execute the LU.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
228 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
229 a8083063 Iustin Pop
    code, or expected.
230 a8083063 Iustin Pop

231 a8083063 Iustin Pop
    """
232 3a012b41 Michael Hanselmann
    if self.tasklets is not None:
233 b4a9eb66 Michael Hanselmann
      for (idx, tl) in enumerate(self.tasklets):
234 abae1b2b Michael Hanselmann
        logging.debug("Executing tasklet %s/%s", idx + 1, len(self.tasklets))
235 6fd35c4d Michael Hanselmann
        tl.Exec(feedback_fn)
236 6fd35c4d Michael Hanselmann
    else:
237 6fd35c4d Michael Hanselmann
      raise NotImplementedError
238 a8083063 Iustin Pop
239 a8083063 Iustin Pop
  def BuildHooksEnv(self):
240 a8083063 Iustin Pop
    """Build hooks environment for this LU.
241 a8083063 Iustin Pop

242 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
243 a8083063 Iustin Pop
    containing the environment that will be used for running the
244 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
245 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
246 a8083063 Iustin Pop
    the hook should run after the execution.
247 a8083063 Iustin Pop

248 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
249 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
250 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
251 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
252 a8083063 Iustin Pop

253 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
254 a8083063 Iustin Pop

255 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
256 a8083063 Iustin Pop
    not be called.
257 a8083063 Iustin Pop

258 a8083063 Iustin Pop
    """
259 a8083063 Iustin Pop
    raise NotImplementedError
260 a8083063 Iustin Pop
261 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
262 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
263 1fce5219 Guido Trotter

264 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
265 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
266 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
267 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
268 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
269 1fce5219 Guido Trotter

270 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
271 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
272 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
273 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
274 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
275 e4376078 Iustin Pop
        in the PRE phase
276 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
277 e4376078 Iustin Pop
        and hook results
278 1fce5219 Guido Trotter

279 1fce5219 Guido Trotter
    """
280 1fce5219 Guido Trotter
    return lu_result
281 1fce5219 Guido Trotter
282 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
283 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
284 43905206 Guido Trotter

285 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
286 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
287 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
288 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
289 43905206 Guido Trotter
    before.
290 43905206 Guido Trotter

291 43905206 Guido Trotter
    """
292 43905206 Guido Trotter
    if self.needed_locks is None:
293 43905206 Guido Trotter
      self.needed_locks = {}
294 43905206 Guido Trotter
    else:
295 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
296 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
297 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
298 43905206 Guido Trotter
    if expanded_name is None:
299 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
300 43905206 Guido Trotter
                                  self.op.instance_name)
301 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
302 43905206 Guido Trotter
    self.op.instance_name = expanded_name
303 43905206 Guido Trotter
304 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
305 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
306 c4a2fee1 Guido Trotter

307 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
308 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
309 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
310 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
311 c4a2fee1 Guido Trotter

312 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
313 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
314 c4a2fee1 Guido Trotter

315 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
316 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
317 c4a2fee1 Guido Trotter

318 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
319 c4a2fee1 Guido Trotter

320 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
321 e4376078 Iustin Pop
        self._LockInstancesNodes()
322 c4a2fee1 Guido Trotter

323 a82ce292 Guido Trotter
    @type primary_only: boolean
324 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
325 a82ce292 Guido Trotter

326 c4a2fee1 Guido Trotter
    """
327 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
328 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
329 c4a2fee1 Guido Trotter
330 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
331 c4a2fee1 Guido Trotter
332 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
333 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
334 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
335 c4a2fee1 Guido Trotter
    wanted_nodes = []
336 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
337 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
338 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
339 a82ce292 Guido Trotter
      if not primary_only:
340 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
341 9513b6ab Guido Trotter
342 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
343 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
344 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
345 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
346 c4a2fee1 Guido Trotter
347 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
348 c4a2fee1 Guido Trotter
349 a8083063 Iustin Pop
350 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
351 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
352 a8083063 Iustin Pop

353 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
354 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
355 a8083063 Iustin Pop

356 a8083063 Iustin Pop
  """
357 a8083063 Iustin Pop
  HPATH = None
358 a8083063 Iustin Pop
  HTYPE = None
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
361 9a6800e1 Michael Hanselmann
class Tasklet:
362 9a6800e1 Michael Hanselmann
  """Tasklet base class.
363 9a6800e1 Michael Hanselmann

364 9a6800e1 Michael Hanselmann
  Tasklets are subcomponents for LUs. LUs can consist entirely of tasklets or
365 9a6800e1 Michael Hanselmann
  they can mix legacy code with tasklets. Locking needs to be done in the LU,
366 9a6800e1 Michael Hanselmann
  tasklets know nothing about locks.
367 9a6800e1 Michael Hanselmann

368 9a6800e1 Michael Hanselmann
  Subclasses must follow these rules:
369 9a6800e1 Michael Hanselmann
    - Implement CheckPrereq
370 9a6800e1 Michael Hanselmann
    - Implement Exec
371 9a6800e1 Michael Hanselmann

372 9a6800e1 Michael Hanselmann
  """
373 464243a7 Michael Hanselmann
  def __init__(self, lu):
374 464243a7 Michael Hanselmann
    self.lu = lu
375 464243a7 Michael Hanselmann
376 464243a7 Michael Hanselmann
    # Shortcuts
377 464243a7 Michael Hanselmann
    self.cfg = lu.cfg
378 464243a7 Michael Hanselmann
    self.rpc = lu.rpc
379 464243a7 Michael Hanselmann
380 9a6800e1 Michael Hanselmann
  def CheckPrereq(self):
381 9a6800e1 Michael Hanselmann
    """Check prerequisites for this tasklets.
382 9a6800e1 Michael Hanselmann

383 9a6800e1 Michael Hanselmann
    This method should check whether the prerequisites for the execution of
384 9a6800e1 Michael Hanselmann
    this tasklet are fulfilled. It can do internode communication, but it
385 9a6800e1 Michael Hanselmann
    should be idempotent - no cluster or system changes are allowed.
386 9a6800e1 Michael Hanselmann

387 9a6800e1 Michael Hanselmann
    The method should raise errors.OpPrereqError in case something is not
388 9a6800e1 Michael Hanselmann
    fulfilled. Its return value is ignored.
389 9a6800e1 Michael Hanselmann

390 9a6800e1 Michael Hanselmann
    This method should also update all parameters to their canonical form if it
391 9a6800e1 Michael Hanselmann
    hasn't been done before.
392 9a6800e1 Michael Hanselmann

393 9a6800e1 Michael Hanselmann
    """
394 9a6800e1 Michael Hanselmann
    raise NotImplementedError
395 9a6800e1 Michael Hanselmann
396 9a6800e1 Michael Hanselmann
  def Exec(self, feedback_fn):
397 9a6800e1 Michael Hanselmann
    """Execute the tasklet.
398 9a6800e1 Michael Hanselmann

399 9a6800e1 Michael Hanselmann
    This method should implement the actual work. It should raise
400 9a6800e1 Michael Hanselmann
    errors.OpExecError for failures that are somewhat dealt with in code, or
401 9a6800e1 Michael Hanselmann
    expected.
402 9a6800e1 Michael Hanselmann

403 9a6800e1 Michael Hanselmann
    """
404 9a6800e1 Michael Hanselmann
    raise NotImplementedError
405 9a6800e1 Michael Hanselmann
406 9a6800e1 Michael Hanselmann
407 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
408 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
409 83120a01 Michael Hanselmann

410 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
411 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
412 e4376078 Iustin Pop
  @type nodes: list
413 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
414 e4376078 Iustin Pop
  @rtype: list
415 e4376078 Iustin Pop
  @return: the list of nodes, sorted
416 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
417 83120a01 Michael Hanselmann

418 83120a01 Michael Hanselmann
  """
419 3312b702 Iustin Pop
  if not isinstance(nodes, list):
420 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
421 dcb93971 Michael Hanselmann
422 ea47808a Guido Trotter
  if not nodes:
423 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
424 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
425 dcb93971 Michael Hanselmann
426 ea47808a Guido Trotter
  wanted = []
427 ea47808a Guido Trotter
  for name in nodes:
428 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
429 ea47808a Guido Trotter
    if node is None:
430 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
431 ea47808a Guido Trotter
    wanted.append(node)
432 dcb93971 Michael Hanselmann
433 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
434 3312b702 Iustin Pop
435 3312b702 Iustin Pop
436 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
437 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
438 3312b702 Iustin Pop

439 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
440 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
441 e4376078 Iustin Pop
  @type instances: list
442 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
443 e4376078 Iustin Pop
  @rtype: list
444 e4376078 Iustin Pop
  @return: the list of instances, sorted
445 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
446 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
447 3312b702 Iustin Pop

448 3312b702 Iustin Pop
  """
449 3312b702 Iustin Pop
  if not isinstance(instances, list):
450 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
451 3312b702 Iustin Pop
452 3312b702 Iustin Pop
  if instances:
453 3312b702 Iustin Pop
    wanted = []
454 3312b702 Iustin Pop
455 3312b702 Iustin Pop
    for name in instances:
456 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
457 3312b702 Iustin Pop
      if instance is None:
458 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
459 3312b702 Iustin Pop
      wanted.append(instance)
460 3312b702 Iustin Pop
461 3312b702 Iustin Pop
  else:
462 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
463 a7f5dc98 Iustin Pop
  return wanted
464 dcb93971 Michael Hanselmann
465 dcb93971 Michael Hanselmann
466 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
467 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
468 83120a01 Michael Hanselmann

469 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
470 31bf511f Iustin Pop
  @param static: static fields set
471 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
472 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
473 83120a01 Michael Hanselmann

474 83120a01 Michael Hanselmann
  """
475 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
476 31bf511f Iustin Pop
  f.Extend(static)
477 31bf511f Iustin Pop
  f.Extend(dynamic)
478 dcb93971 Michael Hanselmann
479 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
480 31bf511f Iustin Pop
  if delta:
481 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
482 31bf511f Iustin Pop
                               % ",".join(delta))
483 dcb93971 Michael Hanselmann
484 dcb93971 Michael Hanselmann
485 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
486 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
487 a5961235 Iustin Pop

488 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
489 a5961235 Iustin Pop
  or None (but that it always exists).
490 a5961235 Iustin Pop

491 a5961235 Iustin Pop
  """
492 a5961235 Iustin Pop
  val = getattr(op, name, None)
493 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
494 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
495 a5961235 Iustin Pop
                               (name, str(val)))
496 a5961235 Iustin Pop
  setattr(op, name, val)
497 a5961235 Iustin Pop
498 a5961235 Iustin Pop
499 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
500 a5961235 Iustin Pop
  """Ensure that a given node is online.
501 a5961235 Iustin Pop

502 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
503 a5961235 Iustin Pop
  @param node: the node to check
504 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
505 a5961235 Iustin Pop

506 a5961235 Iustin Pop
  """
507 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
508 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
509 a5961235 Iustin Pop
510 a5961235 Iustin Pop
511 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
512 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
513 733a2b6a Iustin Pop

514 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
515 733a2b6a Iustin Pop
  @param node: the node to check
516 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
517 733a2b6a Iustin Pop

518 733a2b6a Iustin Pop
  """
519 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
520 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
521 733a2b6a Iustin Pop
522 733a2b6a Iustin Pop
523 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
524 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
525 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
526 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
527 e4376078 Iustin Pop

528 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
529 e4376078 Iustin Pop

530 e4376078 Iustin Pop
  @type name: string
531 e4376078 Iustin Pop
  @param name: the name of the instance
532 e4376078 Iustin Pop
  @type primary_node: string
533 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
534 e4376078 Iustin Pop
  @type secondary_nodes: list
535 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
536 e4376078 Iustin Pop
  @type os_type: string
537 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
538 0d68c45d Iustin Pop
  @type status: boolean
539 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
540 e4376078 Iustin Pop
  @type memory: string
541 e4376078 Iustin Pop
  @param memory: the memory size of the instance
542 e4376078 Iustin Pop
  @type vcpus: string
543 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
544 e4376078 Iustin Pop
  @type nics: list
545 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
546 5e3d3eb3 Guido Trotter
      the NICs the instance has
547 2c2690c9 Iustin Pop
  @type disk_template: string
548 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
549 2c2690c9 Iustin Pop
  @type disks: list
550 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
551 67fc3042 Iustin Pop
  @type bep: dict
552 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
553 67fc3042 Iustin Pop
  @type hvp: dict
554 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
555 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
556 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
557 e4376078 Iustin Pop
  @rtype: dict
558 e4376078 Iustin Pop
  @return: the hook environment for this instance
559 ecb215b5 Michael Hanselmann

560 396e1b78 Michael Hanselmann
  """
561 0d68c45d Iustin Pop
  if status:
562 0d68c45d Iustin Pop
    str_status = "up"
563 0d68c45d Iustin Pop
  else:
564 0d68c45d Iustin Pop
    str_status = "down"
565 396e1b78 Michael Hanselmann
  env = {
566 0e137c28 Iustin Pop
    "OP_TARGET": name,
567 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
568 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
569 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
570 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
571 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
572 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
573 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
574 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
575 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
576 396e1b78 Michael Hanselmann
  }
577 396e1b78 Michael Hanselmann
578 396e1b78 Michael Hanselmann
  if nics:
579 396e1b78 Michael Hanselmann
    nic_count = len(nics)
580 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
581 396e1b78 Michael Hanselmann
      if ip is None:
582 396e1b78 Michael Hanselmann
        ip = ""
583 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
584 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
585 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
586 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
587 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
588 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
589 396e1b78 Michael Hanselmann
  else:
590 396e1b78 Michael Hanselmann
    nic_count = 0
591 396e1b78 Michael Hanselmann
592 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
593 396e1b78 Michael Hanselmann
594 2c2690c9 Iustin Pop
  if disks:
595 2c2690c9 Iustin Pop
    disk_count = len(disks)
596 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
597 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
598 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
599 2c2690c9 Iustin Pop
  else:
600 2c2690c9 Iustin Pop
    disk_count = 0
601 2c2690c9 Iustin Pop
602 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
603 2c2690c9 Iustin Pop
604 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
605 67fc3042 Iustin Pop
    for key, value in source.items():
606 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
607 67fc3042 Iustin Pop
608 396e1b78 Michael Hanselmann
  return env
609 396e1b78 Michael Hanselmann
610 96acbc09 Michael Hanselmann
611 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
612 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
613 62f0dd02 Guido Trotter

614 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
615 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
616 62f0dd02 Guido Trotter

617 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
618 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
619 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
620 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
621 62f0dd02 Guido Trotter

622 62f0dd02 Guido Trotter
  """
623 62f0dd02 Guido Trotter
  hooks_nics = []
624 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
625 62f0dd02 Guido Trotter
  for nic in nics:
626 62f0dd02 Guido Trotter
    ip = nic.ip
627 62f0dd02 Guido Trotter
    mac = nic.mac
628 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
629 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
630 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
631 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
632 62f0dd02 Guido Trotter
  return hooks_nics
633 396e1b78 Michael Hanselmann
634 96acbc09 Michael Hanselmann
635 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
636 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
637 ecb215b5 Michael Hanselmann

638 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
639 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
640 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
641 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
642 e4376078 Iustin Pop
      environment
643 e4376078 Iustin Pop
  @type override: dict
644 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
645 e4376078 Iustin Pop
      our values
646 e4376078 Iustin Pop
  @rtype: dict
647 e4376078 Iustin Pop
  @return: the hook environment dictionary
648 e4376078 Iustin Pop

649 ecb215b5 Michael Hanselmann
  """
650 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
651 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
652 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
653 396e1b78 Michael Hanselmann
  args = {
654 396e1b78 Michael Hanselmann
    'name': instance.name,
655 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
656 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
657 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
658 0d68c45d Iustin Pop
    'status': instance.admin_up,
659 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
660 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
661 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
662 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
663 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
664 67fc3042 Iustin Pop
    'bep': bep,
665 67fc3042 Iustin Pop
    'hvp': hvp,
666 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
667 396e1b78 Michael Hanselmann
  }
668 396e1b78 Michael Hanselmann
  if override:
669 396e1b78 Michael Hanselmann
    args.update(override)
670 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
671 396e1b78 Michael Hanselmann
672 396e1b78 Michael Hanselmann
673 44485f49 Guido Trotter
def _AdjustCandidatePool(lu, exceptions):
674 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
675 ec0292f1 Iustin Pop

676 ec0292f1 Iustin Pop
  """
677 44485f49 Guido Trotter
  mod_list = lu.cfg.MaintainCandidatePool(exceptions)
678 ec0292f1 Iustin Pop
  if mod_list:
679 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
680 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
681 ec0292f1 Iustin Pop
    for name in mod_list:
682 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
683 44485f49 Guido Trotter
  mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
684 ec0292f1 Iustin Pop
  if mc_now > mc_max:
685 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
686 ec0292f1 Iustin Pop
               (mc_now, mc_max))
687 ec0292f1 Iustin Pop
688 ec0292f1 Iustin Pop
689 6d7e1f20 Guido Trotter
def _DecideSelfPromotion(lu, exceptions=None):
690 6d7e1f20 Guido Trotter
  """Decide whether I should promote myself as a master candidate.
691 6d7e1f20 Guido Trotter

692 6d7e1f20 Guido Trotter
  """
693 6d7e1f20 Guido Trotter
  cp_size = lu.cfg.GetClusterInfo().candidate_pool_size
694 6d7e1f20 Guido Trotter
  mc_now, mc_should, _ = lu.cfg.GetMasterCandidateStats(exceptions)
695 6d7e1f20 Guido Trotter
  # the new node will increase mc_max with one, so:
696 6d7e1f20 Guido Trotter
  mc_should = min(mc_should + 1, cp_size)
697 6d7e1f20 Guido Trotter
  return mc_now < mc_should
698 6d7e1f20 Guido Trotter
699 6d7e1f20 Guido Trotter
700 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
701 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
702 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
703 b165e77e Guido Trotter

704 b165e77e Guido Trotter
  """
705 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
706 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
707 b165e77e Guido Trotter
                for nic in target_nics]
708 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
709 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
710 b165e77e Guido Trotter
  if brlist:
711 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
712 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
713 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
714 b165e77e Guido Trotter
715 b165e77e Guido Trotter
716 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
717 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
718 bf6929a2 Alexander Schreiber

719 bf6929a2 Alexander Schreiber
  """
720 b165e77e Guido Trotter
  if node is None:
721 29921401 Iustin Pop
    node = instance.primary_node
722 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
723 bf6929a2 Alexander Schreiber
724 bf6929a2 Alexander Schreiber
725 c6f1af07 Iustin Pop
def _CheckOSVariant(os_obj, name):
726 f2c05717 Guido Trotter
  """Check whether an OS name conforms to the os variants specification.
727 f2c05717 Guido Trotter

728 c6f1af07 Iustin Pop
  @type os_obj: L{objects.OS}
729 c6f1af07 Iustin Pop
  @param os_obj: OS object to check
730 f2c05717 Guido Trotter
  @type name: string
731 f2c05717 Guido Trotter
  @param name: OS name passed by the user, to check for validity
732 f2c05717 Guido Trotter

733 f2c05717 Guido Trotter
  """
734 c6f1af07 Iustin Pop
  if not os_obj.supported_variants:
735 f2c05717 Guido Trotter
    return
736 f2c05717 Guido Trotter
  try:
737 f2c05717 Guido Trotter
    variant = name.split("+", 1)[1]
738 f2c05717 Guido Trotter
  except IndexError:
739 f2c05717 Guido Trotter
    raise errors.OpPrereqError("OS name must include a variant")
740 f2c05717 Guido Trotter
741 c6f1af07 Iustin Pop
  if variant not in os_obj.supported_variants:
742 f2c05717 Guido Trotter
    raise errors.OpPrereqError("Unsupported OS variant")
743 f2c05717 Guido Trotter
744 f2c05717 Guido Trotter
745 5ba9701d Michael Hanselmann
def _GetNodeInstancesInner(cfg, fn):
746 5ba9701d Michael Hanselmann
  return [i for i in cfg.GetAllInstancesInfo().values() if fn(i)]
747 5ba9701d Michael Hanselmann
748 5ba9701d Michael Hanselmann
749 e9721add Michael Hanselmann
def _GetNodeInstances(cfg, node_name):
750 e9721add Michael Hanselmann
  """Returns a list of all primary and secondary instances on a node.
751 e9721add Michael Hanselmann

752 e9721add Michael Hanselmann
  """
753 e9721add Michael Hanselmann
754 e9721add Michael Hanselmann
  return _GetNodeInstancesInner(cfg, lambda inst: node_name in inst.all_nodes)
755 e9721add Michael Hanselmann
756 e9721add Michael Hanselmann
757 80cb875c Michael Hanselmann
def _GetNodePrimaryInstances(cfg, node_name):
758 80cb875c Michael Hanselmann
  """Returns primary instances on a node.
759 80cb875c Michael Hanselmann

760 80cb875c Michael Hanselmann
  """
761 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
762 5ba9701d Michael Hanselmann
                                lambda inst: node_name == inst.primary_node)
763 80cb875c Michael Hanselmann
764 80cb875c Michael Hanselmann
765 692738fc Michael Hanselmann
def _GetNodeSecondaryInstances(cfg, node_name):
766 692738fc Michael Hanselmann
  """Returns secondary instances on a node.
767 692738fc Michael Hanselmann

768 692738fc Michael Hanselmann
  """
769 5ba9701d Michael Hanselmann
  return _GetNodeInstancesInner(cfg,
770 5ba9701d Michael Hanselmann
                                lambda inst: node_name in inst.secondary_nodes)
771 692738fc Michael Hanselmann
772 692738fc Michael Hanselmann
773 efb8da02 Michael Hanselmann
def _GetStorageTypeArgs(cfg, storage_type):
774 efb8da02 Michael Hanselmann
  """Returns the arguments for a storage type.
775 efb8da02 Michael Hanselmann

776 efb8da02 Michael Hanselmann
  """
777 efb8da02 Michael Hanselmann
  # Special case for file storage
778 efb8da02 Michael Hanselmann
  if storage_type == constants.ST_FILE:
779 a4d138b7 Michael Hanselmann
    # storage.FileStorage wants a list of storage directories
780 a4d138b7 Michael Hanselmann
    return [[cfg.GetFileStorageDir()]]
781 efb8da02 Michael Hanselmann
782 efb8da02 Michael Hanselmann
  return []
783 efb8da02 Michael Hanselmann
784 efb8da02 Michael Hanselmann
785 2d9005d8 Michael Hanselmann
def _FindFaultyInstanceDisks(cfg, rpc, instance, node_name, prereq):
786 2d9005d8 Michael Hanselmann
  faulty = []
787 2d9005d8 Michael Hanselmann
788 2d9005d8 Michael Hanselmann
  for dev in instance.disks:
789 2d9005d8 Michael Hanselmann
    cfg.SetDiskID(dev, node_name)
790 2d9005d8 Michael Hanselmann
791 2d9005d8 Michael Hanselmann
  result = rpc.call_blockdev_getmirrorstatus(node_name, instance.disks)
792 2d9005d8 Michael Hanselmann
  result.Raise("Failed to get disk status from node %s" % node_name,
793 2d9005d8 Michael Hanselmann
               prereq=prereq)
794 2d9005d8 Michael Hanselmann
795 2d9005d8 Michael Hanselmann
  for idx, bdev_status in enumerate(result.payload):
796 2d9005d8 Michael Hanselmann
    if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
797 2d9005d8 Michael Hanselmann
      faulty.append(idx)
798 2d9005d8 Michael Hanselmann
799 2d9005d8 Michael Hanselmann
  return faulty
800 2d9005d8 Michael Hanselmann
801 2d9005d8 Michael Hanselmann
802 b5f5fae9 Luca Bigliardi
class LUPostInitCluster(LogicalUnit):
803 b5f5fae9 Luca Bigliardi
  """Logical unit for running hooks after cluster initialization.
804 b5f5fae9 Luca Bigliardi

805 b5f5fae9 Luca Bigliardi
  """
806 b5f5fae9 Luca Bigliardi
  HPATH = "cluster-init"
807 b5f5fae9 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
808 b5f5fae9 Luca Bigliardi
  _OP_REQP = []
809 b5f5fae9 Luca Bigliardi
810 b5f5fae9 Luca Bigliardi
  def BuildHooksEnv(self):
811 b5f5fae9 Luca Bigliardi
    """Build hooks env.
812 b5f5fae9 Luca Bigliardi

813 b5f5fae9 Luca Bigliardi
    """
814 b5f5fae9 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
815 b5f5fae9 Luca Bigliardi
    mn = self.cfg.GetMasterNode()
816 b5f5fae9 Luca Bigliardi
    return env, [], [mn]
817 b5f5fae9 Luca Bigliardi
818 b5f5fae9 Luca Bigliardi
  def CheckPrereq(self):
819 b5f5fae9 Luca Bigliardi
    """No prerequisites to check.
820 b5f5fae9 Luca Bigliardi

821 b5f5fae9 Luca Bigliardi
    """
822 b5f5fae9 Luca Bigliardi
    return True
823 b5f5fae9 Luca Bigliardi
824 b5f5fae9 Luca Bigliardi
  def Exec(self, feedback_fn):
825 b5f5fae9 Luca Bigliardi
    """Nothing to do.
826 b5f5fae9 Luca Bigliardi

827 b5f5fae9 Luca Bigliardi
    """
828 b5f5fae9 Luca Bigliardi
    return True
829 b5f5fae9 Luca Bigliardi
830 b5f5fae9 Luca Bigliardi
831 b2c750a4 Luca Bigliardi
class LUDestroyCluster(LogicalUnit):
832 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
833 a8083063 Iustin Pop

834 a8083063 Iustin Pop
  """
835 b2c750a4 Luca Bigliardi
  HPATH = "cluster-destroy"
836 b2c750a4 Luca Bigliardi
  HTYPE = constants.HTYPE_CLUSTER
837 a8083063 Iustin Pop
  _OP_REQP = []
838 a8083063 Iustin Pop
839 b2c750a4 Luca Bigliardi
  def BuildHooksEnv(self):
840 b2c750a4 Luca Bigliardi
    """Build hooks env.
841 b2c750a4 Luca Bigliardi

842 b2c750a4 Luca Bigliardi
    """
843 b2c750a4 Luca Bigliardi
    env = {"OP_TARGET": self.cfg.GetClusterName()}
844 b2c750a4 Luca Bigliardi
    return env, [], []
845 b2c750a4 Luca Bigliardi
846 a8083063 Iustin Pop
  def CheckPrereq(self):
847 a8083063 Iustin Pop
    """Check prerequisites.
848 a8083063 Iustin Pop

849 a8083063 Iustin Pop
    This checks whether the cluster is empty.
850 a8083063 Iustin Pop

851 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
    """
854 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
857 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
858 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
859 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
860 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
861 db915bd1 Michael Hanselmann
    if instancelist:
862 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
863 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
866 a8083063 Iustin Pop
    """Destroys the cluster.
867 a8083063 Iustin Pop

868 a8083063 Iustin Pop
    """
869 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
870 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
871 3141ad3b Luca Bigliardi
872 3141ad3b Luca Bigliardi
    # Run post hooks on master node before it's removed
873 3141ad3b Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
874 3141ad3b Luca Bigliardi
    try:
875 3141ad3b Luca Bigliardi
      hm.RunPhase(constants.HOOKS_PHASE_POST, [master])
876 3141ad3b Luca Bigliardi
    except:
877 3141ad3b Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % master)
878 3141ad3b Luca Bigliardi
879 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
880 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
881 b989b9d9 Ken Wehr
882 b989b9d9 Ken Wehr
    if modify_ssh_setup:
883 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
884 b989b9d9 Ken Wehr
      utils.CreateBackup(priv_key)
885 b989b9d9 Ken Wehr
      utils.CreateBackup(pub_key)
886 b989b9d9 Ken Wehr
887 140aa4a8 Iustin Pop
    return master
888 a8083063 Iustin Pop
889 a8083063 Iustin Pop
890 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
891 a8083063 Iustin Pop
  """Verifies the cluster status.
892 a8083063 Iustin Pop

893 a8083063 Iustin Pop
  """
894 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
895 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
896 a0c9776a Iustin Pop
  _OP_REQP = ["skip_checks", "verbose", "error_codes", "debug_simulate_errors"]
897 d4b9d97f Guido Trotter
  REQ_BGL = False
898 d4b9d97f Guido Trotter
899 7c874ee1 Iustin Pop
  TCLUSTER = "cluster"
900 7c874ee1 Iustin Pop
  TNODE = "node"
901 7c874ee1 Iustin Pop
  TINSTANCE = "instance"
902 7c874ee1 Iustin Pop
903 7c874ee1 Iustin Pop
  ECLUSTERCFG = (TCLUSTER, "ECLUSTERCFG")
904 7c874ee1 Iustin Pop
  EINSTANCEBADNODE = (TINSTANCE, "EINSTANCEBADNODE")
905 7c874ee1 Iustin Pop
  EINSTANCEDOWN = (TINSTANCE, "EINSTANCEDOWN")
906 7c874ee1 Iustin Pop
  EINSTANCELAYOUT = (TINSTANCE, "EINSTANCELAYOUT")
907 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
908 7c874ee1 Iustin Pop
  EINSTANCEMISSINGDISK = (TINSTANCE, "EINSTANCEMISSINGDISK")
909 7c874ee1 Iustin Pop
  EINSTANCEWRONGNODE = (TINSTANCE, "EINSTANCEWRONGNODE")
910 7c874ee1 Iustin Pop
  ENODEDRBD = (TNODE, "ENODEDRBD")
911 7c874ee1 Iustin Pop
  ENODEFILECHECK = (TNODE, "ENODEFILECHECK")
912 7c874ee1 Iustin Pop
  ENODEHOOKS = (TNODE, "ENODEHOOKS")
913 7c874ee1 Iustin Pop
  ENODEHV = (TNODE, "ENODEHV")
914 7c874ee1 Iustin Pop
  ENODELVM = (TNODE, "ENODELVM")
915 7c874ee1 Iustin Pop
  ENODEN1 = (TNODE, "ENODEN1")
916 7c874ee1 Iustin Pop
  ENODENET = (TNODE, "ENODENET")
917 7c874ee1 Iustin Pop
  ENODEORPHANINSTANCE = (TNODE, "ENODEORPHANINSTANCE")
918 7c874ee1 Iustin Pop
  ENODEORPHANLV = (TNODE, "ENODEORPHANLV")
919 7c874ee1 Iustin Pop
  ENODERPC = (TNODE, "ENODERPC")
920 7c874ee1 Iustin Pop
  ENODESSH = (TNODE, "ENODESSH")
921 7c874ee1 Iustin Pop
  ENODEVERSION = (TNODE, "ENODEVERSION")
922 7c0aa8e9 Iustin Pop
  ENODESETUP = (TNODE, "ENODESETUP")
923 7c874ee1 Iustin Pop
924 a0c9776a Iustin Pop
  ETYPE_FIELD = "code"
925 a0c9776a Iustin Pop
  ETYPE_ERROR = "ERROR"
926 a0c9776a Iustin Pop
  ETYPE_WARNING = "WARNING"
927 a0c9776a Iustin Pop
928 d4b9d97f Guido Trotter
  def ExpandNames(self):
929 d4b9d97f Guido Trotter
    self.needed_locks = {
930 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
931 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
932 d4b9d97f Guido Trotter
    }
933 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
934 a8083063 Iustin Pop
935 7c874ee1 Iustin Pop
  def _Error(self, ecode, item, msg, *args, **kwargs):
936 7c874ee1 Iustin Pop
    """Format an error message.
937 7c874ee1 Iustin Pop

938 7c874ee1 Iustin Pop
    Based on the opcode's error_codes parameter, either format a
939 7c874ee1 Iustin Pop
    parseable error code, or a simpler error string.
940 7c874ee1 Iustin Pop

941 7c874ee1 Iustin Pop
    This must be called only from Exec and functions called from Exec.
942 7c874ee1 Iustin Pop

943 7c874ee1 Iustin Pop
    """
944 a0c9776a Iustin Pop
    ltype = kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR)
945 7c874ee1 Iustin Pop
    itype, etxt = ecode
946 7c874ee1 Iustin Pop
    # first complete the msg
947 7c874ee1 Iustin Pop
    if args:
948 7c874ee1 Iustin Pop
      msg = msg % args
949 7c874ee1 Iustin Pop
    # then format the whole message
950 7c874ee1 Iustin Pop
    if self.op.error_codes:
951 7c874ee1 Iustin Pop
      msg = "%s:%s:%s:%s:%s" % (ltype, etxt, itype, item, msg)
952 7c874ee1 Iustin Pop
    else:
953 7c874ee1 Iustin Pop
      if item:
954 7c874ee1 Iustin Pop
        item = " " + item
955 7c874ee1 Iustin Pop
      else:
956 7c874ee1 Iustin Pop
        item = ""
957 7c874ee1 Iustin Pop
      msg = "%s: %s%s: %s" % (ltype, itype, item, msg)
958 7c874ee1 Iustin Pop
    # and finally report it via the feedback_fn
959 7c874ee1 Iustin Pop
    self._feedback_fn("  - %s" % msg)
960 7c874ee1 Iustin Pop
961 a0c9776a Iustin Pop
  def _ErrorIf(self, cond, *args, **kwargs):
962 a0c9776a Iustin Pop
    """Log an error message if the passed condition is True.
963 a0c9776a Iustin Pop

964 a0c9776a Iustin Pop
    """
965 a0c9776a Iustin Pop
    cond = bool(cond) or self.op.debug_simulate_errors
966 a0c9776a Iustin Pop
    if cond:
967 a0c9776a Iustin Pop
      self._Error(*args, **kwargs)
968 a0c9776a Iustin Pop
    # do not mark the operation as failed for WARN cases only
969 a0c9776a Iustin Pop
    if kwargs.get(self.ETYPE_FIELD, self.ETYPE_ERROR) == self.ETYPE_ERROR:
970 a0c9776a Iustin Pop
      self.bad = self.bad or cond
971 a0c9776a Iustin Pop
972 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
973 7c874ee1 Iustin Pop
                  node_result, master_files, drbd_map, vg_name):
974 a8083063 Iustin Pop
    """Run multiple tests against a node.
975 a8083063 Iustin Pop

976 112f18a5 Iustin Pop
    Test list:
977 e4376078 Iustin Pop

978 a8083063 Iustin Pop
      - compares ganeti version
979 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
980 a8083063 Iustin Pop
      - checks config file checksum
981 a8083063 Iustin Pop
      - checks ssh to other nodes
982 a8083063 Iustin Pop

983 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
984 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
985 e4376078 Iustin Pop
    @param file_list: required list of files
986 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
987 e4376078 Iustin Pop
    @param node_result: the results from the node
988 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
989 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
990 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
991 6d2e83d5 Iustin Pop
        and their running status
992 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
993 098c0958 Michael Hanselmann

994 a8083063 Iustin Pop
    """
995 112f18a5 Iustin Pop
    node = nodeinfo.name
996 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
997 25361b9a Iustin Pop
998 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
999 a0c9776a Iustin Pop
    test = not node_result or not isinstance(node_result, dict)
1000 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1001 7c874ee1 Iustin Pop
                  "unable to verify node: no data returned")
1002 a0c9776a Iustin Pop
    if test:
1003 a0c9776a Iustin Pop
      return
1004 25361b9a Iustin Pop
1005 a8083063 Iustin Pop
    # compares ganeti version
1006 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
1007 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
1008 a0c9776a Iustin Pop
    test = not (remote_version and
1009 a0c9776a Iustin Pop
                isinstance(remote_version, (list, tuple)) and
1010 a0c9776a Iustin Pop
                len(remote_version) == 2)
1011 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODERPC, node,
1012 a0c9776a Iustin Pop
             "connection to node returned invalid data")
1013 a0c9776a Iustin Pop
    if test:
1014 a0c9776a Iustin Pop
      return
1015 a0c9776a Iustin Pop
1016 a0c9776a Iustin Pop
    test = local_version != remote_version[0]
1017 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEVERSION, node,
1018 a0c9776a Iustin Pop
             "incompatible protocol versions: master %s,"
1019 a0c9776a Iustin Pop
             " node %s", local_version, remote_version[0])
1020 a0c9776a Iustin Pop
    if test:
1021 a0c9776a Iustin Pop
      return
1022 a8083063 Iustin Pop
1023 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
1024 a8083063 Iustin Pop
1025 e9ce0a64 Iustin Pop
    # full package version
1026 a0c9776a Iustin Pop
    self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
1027 a0c9776a Iustin Pop
                  self.ENODEVERSION, node,
1028 7c874ee1 Iustin Pop
                  "software version mismatch: master %s, node %s",
1029 7c874ee1 Iustin Pop
                  constants.RELEASE_VERSION, remote_version[1],
1030 a0c9776a Iustin Pop
                  code=self.ETYPE_WARNING)
1031 e9ce0a64 Iustin Pop
1032 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
1033 cc9e1230 Guido Trotter
    if vg_name is not None:
1034 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
1035 a0c9776a Iustin Pop
      test = not vglist
1036 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODELVM, node, "unable to check volume groups")
1037 a0c9776a Iustin Pop
      if not test:
1038 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
1039 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
1040 a0c9776a Iustin Pop
        _ErrorIf(vgstatus, self.ENODELVM, node, vgstatus)
1041 a8083063 Iustin Pop
1042 a8083063 Iustin Pop
    # checks config file checksum
1043 a8083063 Iustin Pop
1044 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
1045 a0c9776a Iustin Pop
    test = not isinstance(remote_cksum, dict)
1046 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODEFILECHECK, node,
1047 a0c9776a Iustin Pop
             "node hasn't returned file checksum data")
1048 a0c9776a Iustin Pop
    if not test:
1049 a8083063 Iustin Pop
      for file_name in file_list:
1050 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
1051 a0c9776a Iustin Pop
        must_have = (file_name not in master_files) or node_is_mc
1052 a0c9776a Iustin Pop
        # missing
1053 a0c9776a Iustin Pop
        test1 = file_name not in remote_cksum
1054 a0c9776a Iustin Pop
        # invalid checksum
1055 a0c9776a Iustin Pop
        test2 = not test1 and remote_cksum[file_name] != local_cksum[file_name]
1056 a0c9776a Iustin Pop
        # existing and good
1057 a0c9776a Iustin Pop
        test3 = not test1 and remote_cksum[file_name] == local_cksum[file_name]
1058 a0c9776a Iustin Pop
        _ErrorIf(test1 and must_have, self.ENODEFILECHECK, node,
1059 a0c9776a Iustin Pop
                 "file '%s' missing", file_name)
1060 a0c9776a Iustin Pop
        _ErrorIf(test2 and must_have, self.ENODEFILECHECK, node,
1061 a0c9776a Iustin Pop
                 "file '%s' has wrong checksum", file_name)
1062 a0c9776a Iustin Pop
        # not candidate and this is not a must-have file
1063 a0c9776a Iustin Pop
        _ErrorIf(test2 and not must_have, self.ENODEFILECHECK, node,
1064 a0c9776a Iustin Pop
                 "file '%s' should not exist on non master"
1065 a0c9776a Iustin Pop
                 " candidates (and the file is outdated)", file_name)
1066 a0c9776a Iustin Pop
        # all good, except non-master/non-must have combination
1067 a0c9776a Iustin Pop
        _ErrorIf(test3 and not must_have, self.ENODEFILECHECK, node,
1068 a0c9776a Iustin Pop
                 "file '%s' should not exist"
1069 a0c9776a Iustin Pop
                 " on non master candidates", file_name)
1070 a8083063 Iustin Pop
1071 25361b9a Iustin Pop
    # checks ssh to any
1072 25361b9a Iustin Pop
1073 a0c9776a Iustin Pop
    test = constants.NV_NODELIST not in node_result
1074 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODESSH, node,
1075 a0c9776a Iustin Pop
             "node hasn't returned node ssh connectivity data")
1076 a0c9776a Iustin Pop
    if not test:
1077 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
1078 7c874ee1 Iustin Pop
        for a_node, a_msg in node_result[constants.NV_NODELIST].items():
1079 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODESSH, node,
1080 a0c9776a Iustin Pop
                   "ssh communication with node '%s': %s", a_node, a_msg)
1081 25361b9a Iustin Pop
1082 a0c9776a Iustin Pop
    test = constants.NV_NODENETTEST not in node_result
1083 a0c9776a Iustin Pop
    _ErrorIf(test, self.ENODENET, node,
1084 a0c9776a Iustin Pop
             "node hasn't returned node tcp connectivity data")
1085 a0c9776a Iustin Pop
    if not test:
1086 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
1087 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
1088 7c874ee1 Iustin Pop
        for anode in nlist:
1089 a0c9776a Iustin Pop
          _ErrorIf(True, self.ENODENET, node,
1090 a0c9776a Iustin Pop
                   "tcp communication with node '%s': %s",
1091 a0c9776a Iustin Pop
                   anode, node_result[constants.NV_NODENETTEST][anode])
1092 9d4bfc96 Iustin Pop
1093 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
1094 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
1095 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
1096 a0c9776a Iustin Pop
        test = hv_result is not None
1097 a0c9776a Iustin Pop
        _ErrorIf(test, self.ENODEHV, node,
1098 a0c9776a Iustin Pop
                 "hypervisor %s verify failure: '%s'", hv_name, hv_result)
1099 6d2e83d5 Iustin Pop
1100 6d2e83d5 Iustin Pop
    # check used drbd list
1101 cc9e1230 Guido Trotter
    if vg_name is not None:
1102 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
1103 a0c9776a Iustin Pop
      test = not isinstance(used_minors, (tuple, list))
1104 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEDRBD, node,
1105 a0c9776a Iustin Pop
               "cannot parse drbd status file: %s", str(used_minors))
1106 a0c9776a Iustin Pop
      if not test:
1107 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
1108 a0c9776a Iustin Pop
          test = minor not in used_minors and must_exist
1109 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1110 a0c9776a Iustin Pop
                   "drbd minor %d of instance %s is not active",
1111 a0c9776a Iustin Pop
                   minor, iname)
1112 cc9e1230 Guido Trotter
        for minor in used_minors:
1113 a0c9776a Iustin Pop
          test = minor not in drbd_map
1114 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODEDRBD, node,
1115 a0c9776a Iustin Pop
                   "unallocated drbd minor %d is in use", minor)
1116 7c0aa8e9 Iustin Pop
    test = node_result.get(constants.NV_NODESETUP,
1117 7c0aa8e9 Iustin Pop
                           ["Missing NODESETUP results"])
1118 7c0aa8e9 Iustin Pop
    _ErrorIf(test, self.ENODESETUP, node, "node setup error: %s",
1119 7c0aa8e9 Iustin Pop
             "; ".join(test))
1120 a8083063 Iustin Pop
1121 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
1122 7c874ee1 Iustin Pop
                      node_instance, n_offline):
1123 a8083063 Iustin Pop
    """Verify an instance.
1124 a8083063 Iustin Pop

1125 a8083063 Iustin Pop
    This function checks to see if the required block devices are
1126 a8083063 Iustin Pop
    available on the instance's node.
1127 a8083063 Iustin Pop

1128 a8083063 Iustin Pop
    """
1129 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1130 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
1131 a8083063 Iustin Pop
1132 a8083063 Iustin Pop
    node_vol_should = {}
1133 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
1134 a8083063 Iustin Pop
1135 a8083063 Iustin Pop
    for node in node_vol_should:
1136 0a66c968 Iustin Pop
      if node in n_offline:
1137 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
1138 0a66c968 Iustin Pop
        continue
1139 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
1140 a0c9776a Iustin Pop
        test = node not in node_vol_is or volume not in node_vol_is[node]
1141 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEMISSINGDISK, instance,
1142 a0c9776a Iustin Pop
                 "volume %s missing on node %s", volume, node)
1143 a8083063 Iustin Pop
1144 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
1145 a0c9776a Iustin Pop
      test = ((node_current not in node_instance or
1146 a0c9776a Iustin Pop
               not instance in node_instance[node_current]) and
1147 a0c9776a Iustin Pop
              node_current not in n_offline)
1148 a0c9776a Iustin Pop
      _ErrorIf(test, self.EINSTANCEDOWN, instance,
1149 a0c9776a Iustin Pop
               "instance not running on its primary node %s",
1150 a0c9776a Iustin Pop
               node_current)
1151 a8083063 Iustin Pop
1152 a8083063 Iustin Pop
    for node in node_instance:
1153 a8083063 Iustin Pop
      if (not node == node_current):
1154 a0c9776a Iustin Pop
        test = instance in node_instance[node]
1155 a0c9776a Iustin Pop
        _ErrorIf(test, self.EINSTANCEWRONGNODE, instance,
1156 a0c9776a Iustin Pop
                 "instance should not run on node %s", node)
1157 a8083063 Iustin Pop
1158 7c874ee1 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is):
1159 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
1160 a8083063 Iustin Pop

1161 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
1162 a8083063 Iustin Pop
    reported as unknown.
1163 a8083063 Iustin Pop

1164 a8083063 Iustin Pop
    """
1165 a8083063 Iustin Pop
    for node in node_vol_is:
1166 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
1167 a0c9776a Iustin Pop
        test = (node not in node_vol_should or
1168 a0c9776a Iustin Pop
                volume not in node_vol_should[node])
1169 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANLV, node,
1170 7c874ee1 Iustin Pop
                      "volume %s is unknown", volume)
1171 a8083063 Iustin Pop
1172 7c874ee1 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance):
1173 a8083063 Iustin Pop
    """Verify the list of running instances.
1174 a8083063 Iustin Pop

1175 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
1176 a8083063 Iustin Pop

1177 a8083063 Iustin Pop
    """
1178 a8083063 Iustin Pop
    for node in node_instance:
1179 7c874ee1 Iustin Pop
      for o_inst in node_instance[node]:
1180 a0c9776a Iustin Pop
        test = o_inst not in instancelist
1181 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEORPHANINSTANCE, node,
1182 7c874ee1 Iustin Pop
                      "instance %s on node %s should not exist", o_inst, node)
1183 a8083063 Iustin Pop
1184 7c874ee1 Iustin Pop
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg):
1185 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
1186 2b3b6ddd Guido Trotter

1187 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
1188 2b3b6ddd Guido Trotter
    was primary for.
1189 2b3b6ddd Guido Trotter

1190 2b3b6ddd Guido Trotter
    """
1191 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
1192 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
1193 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
1194 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
1195 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
1196 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
1197 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
1198 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
1199 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
1200 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
1201 2b3b6ddd Guido Trotter
        needed_mem = 0
1202 2b3b6ddd Guido Trotter
        for instance in instances:
1203 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
1204 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
1205 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
1206 a0c9776a Iustin Pop
        test = nodeinfo['mfree'] < needed_mem
1207 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEN1, node,
1208 7c874ee1 Iustin Pop
                      "not enough memory on to accommodate"
1209 7c874ee1 Iustin Pop
                      " failovers should peer node %s fail", prinode)
1210 2b3b6ddd Guido Trotter
1211 a8083063 Iustin Pop
  def CheckPrereq(self):
1212 a8083063 Iustin Pop
    """Check prerequisites.
1213 a8083063 Iustin Pop

1214 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
1215 e54c4c5e Guido Trotter
    all its members are valid.
1216 a8083063 Iustin Pop

1217 a8083063 Iustin Pop
    """
1218 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
1219 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
1220 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
1221 a8083063 Iustin Pop
1222 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
1223 d8fff41c Guido Trotter
    """Build hooks env.
1224 d8fff41c Guido Trotter

1225 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
1226 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
1227 d8fff41c Guido Trotter

1228 d8fff41c Guido Trotter
    """
1229 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
1230 35e994e9 Iustin Pop
    env = {
1231 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
1232 35e994e9 Iustin Pop
      }
1233 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
1234 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
1235 35e994e9 Iustin Pop
1236 d8fff41c Guido Trotter
    return env, [], all_nodes
1237 d8fff41c Guido Trotter
1238 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1239 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
1240 a8083063 Iustin Pop

1241 a8083063 Iustin Pop
    """
1242 a0c9776a Iustin Pop
    self.bad = False
1243 a0c9776a Iustin Pop
    _ErrorIf = self._ErrorIf
1244 7c874ee1 Iustin Pop
    verbose = self.op.verbose
1245 7c874ee1 Iustin Pop
    self._feedback_fn = feedback_fn
1246 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
1247 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
1248 a0c9776a Iustin Pop
      _ErrorIf(True, self.ECLUSTERCFG, None, msg)
1249 a8083063 Iustin Pop
1250 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
1251 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
1252 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
1253 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
1254 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
1255 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
1256 6d2e83d5 Iustin Pop
                        for iname in instancelist)
1257 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
1258 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
1259 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
1260 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1261 a8083063 Iustin Pop
    node_volume = {}
1262 a8083063 Iustin Pop
    node_instance = {}
1263 9c9c7d30 Guido Trotter
    node_info = {}
1264 26b6af5e Guido Trotter
    instance_cfg = {}
1265 a8083063 Iustin Pop
1266 a8083063 Iustin Pop
    # FIXME: verify OS list
1267 a8083063 Iustin Pop
    # do local checksums
1268 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1269 112f18a5 Iustin Pop
1270 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1271 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1272 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1273 112f18a5 Iustin Pop
    file_names.extend(master_files)
1274 112f18a5 Iustin Pop
1275 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1276 a8083063 Iustin Pop
1277 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1278 a8083063 Iustin Pop
    node_verify_param = {
1279 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1280 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1281 82e37788 Iustin Pop
                              if not node.offline],
1282 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1283 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1284 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1285 82e37788 Iustin Pop
                                 if not node.offline],
1286 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1287 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1288 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1289 7c0aa8e9 Iustin Pop
      constants.NV_NODESETUP: None,
1290 a8083063 Iustin Pop
      }
1291 cc9e1230 Guido Trotter
    if vg_name is not None:
1292 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1293 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1294 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1295 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1296 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1297 a8083063 Iustin Pop
1298 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1299 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1300 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1301 6d2e83d5 Iustin Pop
1302 7c874ee1 Iustin Pop
    feedback_fn("* Verifying node status")
1303 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1304 112f18a5 Iustin Pop
      node = node_i.name
1305 25361b9a Iustin Pop
1306 0a66c968 Iustin Pop
      if node_i.offline:
1307 7c874ee1 Iustin Pop
        if verbose:
1308 7c874ee1 Iustin Pop
          feedback_fn("* Skipping offline node %s" % (node,))
1309 0a66c968 Iustin Pop
        n_offline.append(node)
1310 0a66c968 Iustin Pop
        continue
1311 0a66c968 Iustin Pop
1312 112f18a5 Iustin Pop
      if node == master_node:
1313 25361b9a Iustin Pop
        ntype = "master"
1314 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1315 25361b9a Iustin Pop
        ntype = "master candidate"
1316 22f0f71d Iustin Pop
      elif node_i.drained:
1317 22f0f71d Iustin Pop
        ntype = "drained"
1318 22f0f71d Iustin Pop
        n_drained.append(node)
1319 112f18a5 Iustin Pop
      else:
1320 25361b9a Iustin Pop
        ntype = "regular"
1321 7c874ee1 Iustin Pop
      if verbose:
1322 7c874ee1 Iustin Pop
        feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1323 25361b9a Iustin Pop
1324 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1325 a0c9776a Iustin Pop
      _ErrorIf(msg, self.ENODERPC, node, "while contacting node: %s", msg)
1326 6f68a739 Iustin Pop
      if msg:
1327 25361b9a Iustin Pop
        continue
1328 25361b9a Iustin Pop
1329 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1330 6d2e83d5 Iustin Pop
      node_drbd = {}
1331 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1332 a0c9776a Iustin Pop
        test = instance not in instanceinfo
1333 a0c9776a Iustin Pop
        _ErrorIf(test, self.ECLUSTERCFG, None,
1334 a0c9776a Iustin Pop
                 "ghost instance '%s' in temporary DRBD map", instance)
1335 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1336 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1337 c614e5fb Iustin Pop
          # unallocated minor in use)
1338 a0c9776a Iustin Pop
        if test:
1339 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1340 c614e5fb Iustin Pop
        else:
1341 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1342 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1343 a0c9776a Iustin Pop
      self._VerifyNode(node_i, file_names, local_checksums,
1344 a0c9776a Iustin Pop
                       nresult, master_files, node_drbd, vg_name)
1345 a8083063 Iustin Pop
1346 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1347 cc9e1230 Guido Trotter
      if vg_name is None:
1348 cc9e1230 Guido Trotter
        node_volume[node] = {}
1349 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1350 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "LVM problem on node: %s",
1351 a0c9776a Iustin Pop
                 utils.SafeEncode(lvdata))
1352 b63ed789 Iustin Pop
        node_volume[node] = {}
1353 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1354 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODELVM, node, "rpc call to node failed (lvlist)")
1355 a8083063 Iustin Pop
        continue
1356 b63ed789 Iustin Pop
      else:
1357 25361b9a Iustin Pop
        node_volume[node] = lvdata
1358 a8083063 Iustin Pop
1359 a8083063 Iustin Pop
      # node_instance
1360 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1361 a0c9776a Iustin Pop
      test = not isinstance(idata, list)
1362 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node,
1363 a0c9776a Iustin Pop
               "rpc call to node failed (instancelist)")
1364 a0c9776a Iustin Pop
      if test:
1365 a8083063 Iustin Pop
        continue
1366 a8083063 Iustin Pop
1367 25361b9a Iustin Pop
      node_instance[node] = idata
1368 a8083063 Iustin Pop
1369 9c9c7d30 Guido Trotter
      # node_info
1370 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1371 a0c9776a Iustin Pop
      test = not isinstance(nodeinfo, dict)
1372 a0c9776a Iustin Pop
      _ErrorIf(test, self.ENODEHV, node, "rpc call to node failed (hvinfo)")
1373 a0c9776a Iustin Pop
      if test:
1374 9c9c7d30 Guido Trotter
        continue
1375 9c9c7d30 Guido Trotter
1376 9c9c7d30 Guido Trotter
      try:
1377 9c9c7d30 Guido Trotter
        node_info[node] = {
1378 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1379 93e4c50b Guido Trotter
          "pinst": [],
1380 93e4c50b Guido Trotter
          "sinst": [],
1381 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1382 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1383 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1384 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1385 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1386 36e7da50 Guido Trotter
          # secondary.
1387 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1388 9c9c7d30 Guido Trotter
        }
1389 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1390 cc9e1230 Guido Trotter
        if vg_name is not None:
1391 a0c9776a Iustin Pop
          test = (constants.NV_VGLIST not in nresult or
1392 a0c9776a Iustin Pop
                  vg_name not in nresult[constants.NV_VGLIST])
1393 a0c9776a Iustin Pop
          _ErrorIf(test, self.ENODELVM, node,
1394 a0c9776a Iustin Pop
                   "node didn't return data for the volume group '%s'"
1395 a0c9776a Iustin Pop
                   " - it is either missing or broken", vg_name)
1396 a0c9776a Iustin Pop
          if test:
1397 9a198532 Iustin Pop
            continue
1398 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1399 9a198532 Iustin Pop
      except (ValueError, KeyError):
1400 a0c9776a Iustin Pop
        _ErrorIf(True, self.ENODERPC, node,
1401 a0c9776a Iustin Pop
                 "node returned invalid nodeinfo, check lvm/hypervisor")
1402 9c9c7d30 Guido Trotter
        continue
1403 9c9c7d30 Guido Trotter
1404 a8083063 Iustin Pop
    node_vol_should = {}
1405 a8083063 Iustin Pop
1406 7c874ee1 Iustin Pop
    feedback_fn("* Verifying instance status")
1407 a8083063 Iustin Pop
    for instance in instancelist:
1408 7c874ee1 Iustin Pop
      if verbose:
1409 7c874ee1 Iustin Pop
        feedback_fn("* Verifying instance %s" % instance)
1410 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1411 a0c9776a Iustin Pop
      self._VerifyInstance(instance, inst_config, node_volume,
1412 a0c9776a Iustin Pop
                           node_instance, n_offline)
1413 832261fd Iustin Pop
      inst_nodes_offline = []
1414 a8083063 Iustin Pop
1415 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1416 a8083063 Iustin Pop
1417 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1418 26b6af5e Guido Trotter
1419 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1420 a0c9776a Iustin Pop
      _ErrorIf(pnode not in node_info and pnode not in n_offline,
1421 a0c9776a Iustin Pop
               self.ENODERPC, pnode, "instance %s, connection to"
1422 a0c9776a Iustin Pop
               " primary node failed", instance)
1423 93e4c50b Guido Trotter
      if pnode in node_info:
1424 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1425 93e4c50b Guido Trotter
1426 832261fd Iustin Pop
      if pnode in n_offline:
1427 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1428 832261fd Iustin Pop
1429 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1430 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1431 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1432 93e4c50b Guido Trotter
      # supported either.
1433 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1434 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1435 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1436 a0c9776a Iustin Pop
      _ErrorIf(len(inst_config.secondary_nodes) > 1,
1437 a0c9776a Iustin Pop
               self.EINSTANCELAYOUT, instance,
1438 a0c9776a Iustin Pop
               "instance has multiple secondary nodes", code="WARNING")
1439 93e4c50b Guido Trotter
1440 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1441 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1442 3924700f Iustin Pop
1443 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1444 a0c9776a Iustin Pop
        _ErrorIf(snode not in node_info and snode not in n_offline,
1445 a0c9776a Iustin Pop
                 self.ENODERPC, snode,
1446 a0c9776a Iustin Pop
                 "instance %s, connection to secondary node"
1447 a0c9776a Iustin Pop
                 "failed", instance)
1448 a0c9776a Iustin Pop
1449 93e4c50b Guido Trotter
        if snode in node_info:
1450 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1451 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1452 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1453 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1454 a0c9776a Iustin Pop
1455 832261fd Iustin Pop
        if snode in n_offline:
1456 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1457 832261fd Iustin Pop
1458 a0c9776a Iustin Pop
      # warn that the instance lives on offline nodes
1459 a0c9776a Iustin Pop
      _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance,
1460 a0c9776a Iustin Pop
               "instance lives on offline node(s) %s",
1461 a0c9776a Iustin Pop
               ", ".join(inst_nodes_offline))
1462 93e4c50b Guido Trotter
1463 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1464 a0c9776a Iustin Pop
    self._VerifyOrphanVolumes(node_vol_should, node_volume)
1465 a8083063 Iustin Pop
1466 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1467 a0c9776a Iustin Pop
    self._VerifyOrphanInstances(instancelist, node_instance)
1468 a8083063 Iustin Pop
1469 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1470 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1471 a0c9776a Iustin Pop
      self._VerifyNPlusOneMemory(node_info, instance_cfg)
1472 2b3b6ddd Guido Trotter
1473 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1474 2b3b6ddd Guido Trotter
    if i_non_redundant:
1475 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1476 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1477 2b3b6ddd Guido Trotter
1478 3924700f Iustin Pop
    if i_non_a_balanced:
1479 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1480 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1481 3924700f Iustin Pop
1482 0a66c968 Iustin Pop
    if n_offline:
1483 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1484 0a66c968 Iustin Pop
1485 22f0f71d Iustin Pop
    if n_drained:
1486 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1487 22f0f71d Iustin Pop
1488 a0c9776a Iustin Pop
    return not self.bad
1489 a8083063 Iustin Pop
1490 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1491 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1492 e4376078 Iustin Pop

1493 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1494 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1495 d8fff41c Guido Trotter

1496 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1497 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1498 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1499 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1500 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1501 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1502 e4376078 Iustin Pop
        and hook results
1503 d8fff41c Guido Trotter

1504 d8fff41c Guido Trotter
    """
1505 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1506 38206f3c Iustin Pop
    # their results
1507 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1508 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1509 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1510 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1511 7c874ee1 Iustin Pop
      assert hooks_results, "invalid result from hooks"
1512 7c874ee1 Iustin Pop
1513 7c874ee1 Iustin Pop
      for node_name in hooks_results:
1514 7c874ee1 Iustin Pop
        show_node_header = True
1515 7c874ee1 Iustin Pop
        res = hooks_results[node_name]
1516 7c874ee1 Iustin Pop
        msg = res.fail_msg
1517 a0c9776a Iustin Pop
        test = msg and not res.offline
1518 a0c9776a Iustin Pop
        self._ErrorIf(test, self.ENODEHOOKS, node_name,
1519 7c874ee1 Iustin Pop
                      "Communication failure in hooks execution: %s", msg)
1520 a0c9776a Iustin Pop
        if test:
1521 a0c9776a Iustin Pop
          # override manually lu_result here as _ErrorIf only
1522 a0c9776a Iustin Pop
          # overrides self.bad
1523 7c874ee1 Iustin Pop
          lu_result = 1
1524 7c874ee1 Iustin Pop
          continue
1525 7c874ee1 Iustin Pop
        for script, hkr, output in res.payload:
1526 a0c9776a Iustin Pop
          test = hkr == constants.HKR_FAIL
1527 a0c9776a Iustin Pop
          self._ErrorIf(test, self.ENODEHOOKS, node_name,
1528 7c874ee1 Iustin Pop
                        "Script %s failed, output:", script)
1529 a0c9776a Iustin Pop
          if test:
1530 7c874ee1 Iustin Pop
            output = indent_re.sub('      ', output)
1531 7c874ee1 Iustin Pop
            feedback_fn("%s" % output)
1532 7c874ee1 Iustin Pop
            lu_result = 1
1533 d8fff41c Guido Trotter
1534 d8fff41c Guido Trotter
      return lu_result
1535 d8fff41c Guido Trotter
1536 a8083063 Iustin Pop
1537 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1538 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1539 2c95a8d4 Iustin Pop

1540 2c95a8d4 Iustin Pop
  """
1541 2c95a8d4 Iustin Pop
  _OP_REQP = []
1542 d4b9d97f Guido Trotter
  REQ_BGL = False
1543 d4b9d97f Guido Trotter
1544 d4b9d97f Guido Trotter
  def ExpandNames(self):
1545 d4b9d97f Guido Trotter
    self.needed_locks = {
1546 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1547 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1548 d4b9d97f Guido Trotter
    }
1549 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1550 2c95a8d4 Iustin Pop
1551 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1552 2c95a8d4 Iustin Pop
    """Check prerequisites.
1553 2c95a8d4 Iustin Pop

1554 2c95a8d4 Iustin Pop
    This has no prerequisites.
1555 2c95a8d4 Iustin Pop

1556 2c95a8d4 Iustin Pop
    """
1557 2c95a8d4 Iustin Pop
    pass
1558 2c95a8d4 Iustin Pop
1559 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1560 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1561 2c95a8d4 Iustin Pop

1562 29d376ec Iustin Pop
    @rtype: tuple of three items
1563 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1564 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1565 29d376ec Iustin Pop
        missing volumes
1566 29d376ec Iustin Pop

1567 2c95a8d4 Iustin Pop
    """
1568 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1569 2c95a8d4 Iustin Pop
1570 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1571 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1572 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1573 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1574 2c95a8d4 Iustin Pop
1575 2c95a8d4 Iustin Pop
    nv_dict = {}
1576 2c95a8d4 Iustin Pop
    for inst in instances:
1577 2c95a8d4 Iustin Pop
      inst_lvs = {}
1578 0d68c45d Iustin Pop
      if (not inst.admin_up or
1579 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1580 2c95a8d4 Iustin Pop
        continue
1581 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1582 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1583 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1584 2c95a8d4 Iustin Pop
        for vol in vol_list:
1585 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1586 2c95a8d4 Iustin Pop
1587 2c95a8d4 Iustin Pop
    if not nv_dict:
1588 2c95a8d4 Iustin Pop
      return result
1589 2c95a8d4 Iustin Pop
1590 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1591 2c95a8d4 Iustin Pop
1592 2c95a8d4 Iustin Pop
    for node in nodes:
1593 2c95a8d4 Iustin Pop
      # node_volume
1594 29d376ec Iustin Pop
      node_res = node_lvs[node]
1595 29d376ec Iustin Pop
      if node_res.offline:
1596 ea9ddc07 Iustin Pop
        continue
1597 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1598 29d376ec Iustin Pop
      if msg:
1599 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1600 29d376ec Iustin Pop
        res_nodes[node] = msg
1601 2c95a8d4 Iustin Pop
        continue
1602 2c95a8d4 Iustin Pop
1603 29d376ec Iustin Pop
      lvs = node_res.payload
1604 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1605 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1606 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1607 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1608 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1609 2c95a8d4 Iustin Pop
1610 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1611 b63ed789 Iustin Pop
    # data better
1612 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1613 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1614 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1615 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1616 b63ed789 Iustin Pop
1617 2c95a8d4 Iustin Pop
    return result
1618 2c95a8d4 Iustin Pop
1619 2c95a8d4 Iustin Pop
1620 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1621 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1622 60975797 Iustin Pop

1623 60975797 Iustin Pop
  """
1624 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1625 60975797 Iustin Pop
  REQ_BGL = False
1626 60975797 Iustin Pop
1627 60975797 Iustin Pop
  def ExpandNames(self):
1628 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1629 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1630 60975797 Iustin Pop
1631 60975797 Iustin Pop
    if self.op.instances:
1632 60975797 Iustin Pop
      self.wanted_names = []
1633 60975797 Iustin Pop
      for name in self.op.instances:
1634 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1635 60975797 Iustin Pop
        if full_name is None:
1636 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1637 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1638 60975797 Iustin Pop
      self.needed_locks = {
1639 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1640 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1641 60975797 Iustin Pop
        }
1642 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1643 60975797 Iustin Pop
    else:
1644 60975797 Iustin Pop
      self.wanted_names = None
1645 60975797 Iustin Pop
      self.needed_locks = {
1646 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1647 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1648 60975797 Iustin Pop
        }
1649 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1650 60975797 Iustin Pop
1651 60975797 Iustin Pop
  def DeclareLocks(self, level):
1652 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1653 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1654 60975797 Iustin Pop
1655 60975797 Iustin Pop
  def CheckPrereq(self):
1656 60975797 Iustin Pop
    """Check prerequisites.
1657 60975797 Iustin Pop

1658 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1659 60975797 Iustin Pop

1660 60975797 Iustin Pop
    """
1661 60975797 Iustin Pop
    if self.wanted_names is None:
1662 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1663 60975797 Iustin Pop
1664 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1665 60975797 Iustin Pop
                             in self.wanted_names]
1666 60975797 Iustin Pop
1667 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1668 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1669 b775c337 Iustin Pop

1670 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1671 b775c337 Iustin Pop
    children have smaller disk size.
1672 b775c337 Iustin Pop

1673 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1674 b775c337 Iustin Pop

1675 b775c337 Iustin Pop
    """
1676 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1677 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1678 b775c337 Iustin Pop
      fchild = disk.children[0]
1679 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1680 b775c337 Iustin Pop
      if mismatch:
1681 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1682 b775c337 Iustin Pop
                     fchild.size, disk.size)
1683 b775c337 Iustin Pop
        fchild.size = disk.size
1684 b775c337 Iustin Pop
1685 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1686 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1687 b775c337 Iustin Pop
    else:
1688 b775c337 Iustin Pop
      return False
1689 b775c337 Iustin Pop
1690 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1691 60975797 Iustin Pop
    """Verify the size of cluster disks.
1692 60975797 Iustin Pop

1693 60975797 Iustin Pop
    """
1694 60975797 Iustin Pop
    # TODO: check child disks too
1695 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1696 60975797 Iustin Pop
    per_node_disks = {}
1697 60975797 Iustin Pop
    for instance in self.wanted_instances:
1698 60975797 Iustin Pop
      pnode = instance.primary_node
1699 60975797 Iustin Pop
      if pnode not in per_node_disks:
1700 60975797 Iustin Pop
        per_node_disks[pnode] = []
1701 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1702 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1703 60975797 Iustin Pop
1704 60975797 Iustin Pop
    changed = []
1705 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1706 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1707 4d9e6835 Iustin Pop
      for dsk in newl:
1708 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1709 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1710 3cebe102 Michael Hanselmann
      if result.fail_msg:
1711 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1712 60975797 Iustin Pop
                        " %s, ignoring", node)
1713 60975797 Iustin Pop
        continue
1714 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1715 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1716 60975797 Iustin Pop
                        node)
1717 60975797 Iustin Pop
        continue
1718 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1719 60975797 Iustin Pop
        if size is None:
1720 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1721 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1722 60975797 Iustin Pop
          continue
1723 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1724 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1725 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1726 60975797 Iustin Pop
          continue
1727 60975797 Iustin Pop
        size = size >> 20
1728 60975797 Iustin Pop
        if size != disk.size:
1729 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1730 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1731 60975797 Iustin Pop
                       instance.name, disk.size, size)
1732 60975797 Iustin Pop
          disk.size = size
1733 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
1734 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1735 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1736 a4eae71f Michael Hanselmann
          self.cfg.Update(instance, feedback_fn)
1737 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1738 60975797 Iustin Pop
    return changed
1739 60975797 Iustin Pop
1740 60975797 Iustin Pop
1741 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1742 07bd8a51 Iustin Pop
  """Rename the cluster.
1743 07bd8a51 Iustin Pop

1744 07bd8a51 Iustin Pop
  """
1745 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1746 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1747 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1748 07bd8a51 Iustin Pop
1749 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1750 07bd8a51 Iustin Pop
    """Build hooks env.
1751 07bd8a51 Iustin Pop

1752 07bd8a51 Iustin Pop
    """
1753 07bd8a51 Iustin Pop
    env = {
1754 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1755 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1756 07bd8a51 Iustin Pop
      }
1757 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1758 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1759 07bd8a51 Iustin Pop
1760 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1761 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1762 07bd8a51 Iustin Pop

1763 07bd8a51 Iustin Pop
    """
1764 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1765 07bd8a51 Iustin Pop
1766 bcf043c9 Iustin Pop
    new_name = hostname.name
1767 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1768 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1769 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1770 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1771 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1772 07bd8a51 Iustin Pop
                                 " cluster has changed")
1773 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1774 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1775 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1776 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1777 07bd8a51 Iustin Pop
                                   new_ip)
1778 07bd8a51 Iustin Pop
1779 07bd8a51 Iustin Pop
    self.op.name = new_name
1780 07bd8a51 Iustin Pop
1781 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1782 07bd8a51 Iustin Pop
    """Rename the cluster.
1783 07bd8a51 Iustin Pop

1784 07bd8a51 Iustin Pop
    """
1785 07bd8a51 Iustin Pop
    clustername = self.op.name
1786 07bd8a51 Iustin Pop
    ip = self.ip
1787 07bd8a51 Iustin Pop
1788 07bd8a51 Iustin Pop
    # shutdown the master IP
1789 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1790 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1791 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1792 07bd8a51 Iustin Pop
1793 07bd8a51 Iustin Pop
    try:
1794 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1795 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1796 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1797 a4eae71f Michael Hanselmann
      self.cfg.Update(cluster, feedback_fn)
1798 ec85e3d5 Iustin Pop
1799 ec85e3d5 Iustin Pop
      # update the known hosts file
1800 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1801 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1802 ec85e3d5 Iustin Pop
      try:
1803 ec85e3d5 Iustin Pop
        node_list.remove(master)
1804 ec85e3d5 Iustin Pop
      except ValueError:
1805 ec85e3d5 Iustin Pop
        pass
1806 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1807 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1808 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1809 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1810 6f7d4e75 Iustin Pop
        if msg:
1811 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1812 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1813 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1814 ec85e3d5 Iustin Pop
1815 07bd8a51 Iustin Pop
    finally:
1816 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1817 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1818 b726aff0 Iustin Pop
      if msg:
1819 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1820 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1821 07bd8a51 Iustin Pop
1822 07bd8a51 Iustin Pop
1823 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1824 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1825 8084f9f6 Manuel Franceschini

1826 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1827 e4376078 Iustin Pop
  @param disk: the disk to check
1828 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1829 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1830 8084f9f6 Manuel Franceschini

1831 8084f9f6 Manuel Franceschini
  """
1832 8084f9f6 Manuel Franceschini
  if disk.children:
1833 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1834 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1835 8084f9f6 Manuel Franceschini
        return True
1836 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1837 8084f9f6 Manuel Franceschini
1838 8084f9f6 Manuel Franceschini
1839 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1840 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1841 8084f9f6 Manuel Franceschini

1842 8084f9f6 Manuel Franceschini
  """
1843 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1844 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1845 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1846 c53279cf Guido Trotter
  REQ_BGL = False
1847 c53279cf Guido Trotter
1848 3994f455 Iustin Pop
  def CheckArguments(self):
1849 4b7735f9 Iustin Pop
    """Check parameters
1850 4b7735f9 Iustin Pop

1851 4b7735f9 Iustin Pop
    """
1852 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1853 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1854 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1855 4b7735f9 Iustin Pop
      try:
1856 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1857 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1858 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1859 4b7735f9 Iustin Pop
                                   str(err))
1860 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1861 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1862 4b7735f9 Iustin Pop
1863 c53279cf Guido Trotter
  def ExpandNames(self):
1864 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1865 c53279cf Guido Trotter
    # all nodes to be modified.
1866 c53279cf Guido Trotter
    self.needed_locks = {
1867 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1868 c53279cf Guido Trotter
    }
1869 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1870 8084f9f6 Manuel Franceschini
1871 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1872 8084f9f6 Manuel Franceschini
    """Build hooks env.
1873 8084f9f6 Manuel Franceschini

1874 8084f9f6 Manuel Franceschini
    """
1875 8084f9f6 Manuel Franceschini
    env = {
1876 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1877 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1878 8084f9f6 Manuel Franceschini
      }
1879 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1880 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1881 8084f9f6 Manuel Franceschini
1882 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1883 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1884 8084f9f6 Manuel Franceschini

1885 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1886 5f83e263 Iustin Pop
    if the given volume group is valid.
1887 8084f9f6 Manuel Franceschini

1888 8084f9f6 Manuel Franceschini
    """
1889 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1890 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1891 8084f9f6 Manuel Franceschini
      for inst in instances:
1892 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1893 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1894 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1895 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1896 8084f9f6 Manuel Franceschini
1897 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1898 779c15bb Iustin Pop
1899 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1900 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1901 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1902 8084f9f6 Manuel Franceschini
      for node in node_list:
1903 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1904 e480923b Iustin Pop
        if msg:
1905 781de953 Iustin Pop
          # ignoring down node
1906 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1907 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1908 781de953 Iustin Pop
          continue
1909 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1910 781de953 Iustin Pop
                                              self.op.vg_name,
1911 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1912 8084f9f6 Manuel Franceschini
        if vgstatus:
1913 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1914 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1915 8084f9f6 Manuel Franceschini
1916 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1917 5af3da74 Guido Trotter
    # validate params changes
1918 779c15bb Iustin Pop
    if self.op.beparams:
1919 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1920 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1921 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1922 779c15bb Iustin Pop
1923 5af3da74 Guido Trotter
    if self.op.nicparams:
1924 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1925 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1926 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1927 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1928 5af3da74 Guido Trotter
1929 779c15bb Iustin Pop
    # hypervisor list/parameters
1930 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1931 779c15bb Iustin Pop
    if self.op.hvparams:
1932 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1933 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1934 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1935 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1936 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1937 779c15bb Iustin Pop
        else:
1938 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1939 779c15bb Iustin Pop
1940 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1941 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1942 b119bccb Guido Trotter
      if not self.hv_list:
1943 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1944 b119bccb Guido Trotter
                                   " least one member")
1945 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1946 b119bccb Guido Trotter
      if invalid_hvs:
1947 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1948 6915bc28 Guido Trotter
                                   " entries: %s" % " ,".join(invalid_hvs))
1949 779c15bb Iustin Pop
    else:
1950 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1951 779c15bb Iustin Pop
1952 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1953 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1954 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1955 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1956 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1957 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1958 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1959 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1960 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1961 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1962 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1963 779c15bb Iustin Pop
1964 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1965 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1966 8084f9f6 Manuel Franceschini

1967 8084f9f6 Manuel Franceschini
    """
1968 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1969 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1970 b2482333 Guido Trotter
      if not new_volume:
1971 b2482333 Guido Trotter
        new_volume = None
1972 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1973 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1974 779c15bb Iustin Pop
      else:
1975 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1976 779c15bb Iustin Pop
                    " state, not changing")
1977 779c15bb Iustin Pop
    if self.op.hvparams:
1978 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1979 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1980 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1981 779c15bb Iustin Pop
    if self.op.beparams:
1982 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1983 5af3da74 Guido Trotter
    if self.op.nicparams:
1984 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1985 5af3da74 Guido Trotter
1986 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1987 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1988 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1989 44485f49 Guido Trotter
      _AdjustCandidatePool(self, [])
1990 4b7735f9 Iustin Pop
1991 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cluster, feedback_fn)
1992 8084f9f6 Manuel Franceschini
1993 8084f9f6 Manuel Franceschini
1994 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1995 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1996 28eddce5 Guido Trotter

1997 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1998 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1999 28eddce5 Guido Trotter
  makes sure those are copied.
2000 28eddce5 Guido Trotter

2001 28eddce5 Guido Trotter
  @param lu: calling logical unit
2002 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
2003 28eddce5 Guido Trotter

2004 28eddce5 Guido Trotter
  """
2005 28eddce5 Guido Trotter
  # 1. Gather target nodes
2006 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
2007 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
2008 28eddce5 Guido Trotter
  if additional_nodes is not None:
2009 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
2010 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
2011 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
2012 a4eae71f Michael Hanselmann
2013 28eddce5 Guido Trotter
  # 2. Gather files to distribute
2014 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
2015 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
2016 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
2017 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
2018 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
2019 28eddce5 Guido Trotter
                   ])
2020 e1b8653f Guido Trotter
2021 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
2022 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
2023 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
2024 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
2025 e1b8653f Guido Trotter
2026 28eddce5 Guido Trotter
  # 3. Perform the files upload
2027 28eddce5 Guido Trotter
  for fname in dist_files:
2028 28eddce5 Guido Trotter
    if os.path.exists(fname):
2029 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
2030 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
2031 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
2032 6f7d4e75 Iustin Pop
        if msg:
2033 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
2034 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
2035 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
2036 28eddce5 Guido Trotter
2037 28eddce5 Guido Trotter
2038 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
2039 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
2040 afee0879 Iustin Pop

2041 afee0879 Iustin Pop
  This is a very simple LU.
2042 afee0879 Iustin Pop

2043 afee0879 Iustin Pop
  """
2044 afee0879 Iustin Pop
  _OP_REQP = []
2045 afee0879 Iustin Pop
  REQ_BGL = False
2046 afee0879 Iustin Pop
2047 afee0879 Iustin Pop
  def ExpandNames(self):
2048 afee0879 Iustin Pop
    self.needed_locks = {
2049 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
2050 afee0879 Iustin Pop
    }
2051 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
2052 afee0879 Iustin Pop
2053 afee0879 Iustin Pop
  def CheckPrereq(self):
2054 afee0879 Iustin Pop
    """Check prerequisites.
2055 afee0879 Iustin Pop

2056 afee0879 Iustin Pop
    """
2057 afee0879 Iustin Pop
2058 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
2059 afee0879 Iustin Pop
    """Redistribute the configuration.
2060 afee0879 Iustin Pop

2061 afee0879 Iustin Pop
    """
2062 a4eae71f Michael Hanselmann
    self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
2063 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
2064 afee0879 Iustin Pop
2065 afee0879 Iustin Pop
2066 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
2067 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
2068 a8083063 Iustin Pop

2069 a8083063 Iustin Pop
  """
2070 a8083063 Iustin Pop
  if not instance.disks:
2071 a8083063 Iustin Pop
    return True
2072 a8083063 Iustin Pop
2073 a8083063 Iustin Pop
  if not oneshot:
2074 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
2075 a8083063 Iustin Pop
2076 a8083063 Iustin Pop
  node = instance.primary_node
2077 a8083063 Iustin Pop
2078 a8083063 Iustin Pop
  for dev in instance.disks:
2079 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
  retries = 0
2082 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
2083 a8083063 Iustin Pop
  while True:
2084 a8083063 Iustin Pop
    max_time = 0
2085 a8083063 Iustin Pop
    done = True
2086 a8083063 Iustin Pop
    cumul_degraded = False
2087 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
2088 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2089 3efa9051 Iustin Pop
    if msg:
2090 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
2091 a8083063 Iustin Pop
      retries += 1
2092 a8083063 Iustin Pop
      if retries >= 10:
2093 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
2094 3ecf6786 Iustin Pop
                                 " aborting." % node)
2095 a8083063 Iustin Pop
      time.sleep(6)
2096 a8083063 Iustin Pop
      continue
2097 3efa9051 Iustin Pop
    rstats = rstats.payload
2098 a8083063 Iustin Pop
    retries = 0
2099 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
2100 a8083063 Iustin Pop
      if mstat is None:
2101 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
2102 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
2103 a8083063 Iustin Pop
        continue
2104 36145b12 Michael Hanselmann
2105 36145b12 Michael Hanselmann
      cumul_degraded = (cumul_degraded or
2106 36145b12 Michael Hanselmann
                        (mstat.is_degraded and mstat.sync_percent is None))
2107 36145b12 Michael Hanselmann
      if mstat.sync_percent is not None:
2108 a8083063 Iustin Pop
        done = False
2109 36145b12 Michael Hanselmann
        if mstat.estimated_time is not None:
2110 36145b12 Michael Hanselmann
          rem_time = "%d estimated seconds remaining" % mstat.estimated_time
2111 36145b12 Michael Hanselmann
          max_time = mstat.estimated_time
2112 a8083063 Iustin Pop
        else:
2113 a8083063 Iustin Pop
          rem_time = "no time estimate"
2114 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2115 4d4a651d Michael Hanselmann
                        (instance.disks[i].iv_name, mstat.sync_percent,
2116 4d4a651d Michael Hanselmann
                         rem_time))
2117 fbafd7a8 Iustin Pop
2118 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
2119 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
2120 fbafd7a8 Iustin Pop
    # we force restart of the loop
2121 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
2122 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
2123 fbafd7a8 Iustin Pop
      degr_retries -= 1
2124 fbafd7a8 Iustin Pop
      time.sleep(1)
2125 fbafd7a8 Iustin Pop
      continue
2126 fbafd7a8 Iustin Pop
2127 a8083063 Iustin Pop
    if done or oneshot:
2128 a8083063 Iustin Pop
      break
2129 a8083063 Iustin Pop
2130 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
2131 a8083063 Iustin Pop
2132 a8083063 Iustin Pop
  if done:
2133 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
2134 a8083063 Iustin Pop
  return not cumul_degraded
2135 a8083063 Iustin Pop
2136 a8083063 Iustin Pop
2137 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
2138 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
2139 a8083063 Iustin Pop

2140 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
2141 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
2142 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
2143 0834c866 Iustin Pop

2144 a8083063 Iustin Pop
  """
2145 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
2146 a8083063 Iustin Pop
2147 a8083063 Iustin Pop
  result = True
2148 96acbc09 Michael Hanselmann
2149 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
2150 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
2151 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
2152 23829f6f Iustin Pop
    if msg:
2153 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
2154 23829f6f Iustin Pop
      result = False
2155 23829f6f Iustin Pop
    elif not rstats.payload:
2156 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
2157 a8083063 Iustin Pop
      result = False
2158 a8083063 Iustin Pop
    else:
2159 96acbc09 Michael Hanselmann
      if ldisk:
2160 f208978a Michael Hanselmann
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
2161 96acbc09 Michael Hanselmann
      else:
2162 96acbc09 Michael Hanselmann
        result = result and not rstats.payload.is_degraded
2163 96acbc09 Michael Hanselmann
2164 a8083063 Iustin Pop
  if dev.children:
2165 a8083063 Iustin Pop
    for child in dev.children:
2166 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
2167 a8083063 Iustin Pop
2168 a8083063 Iustin Pop
  return result
2169 a8083063 Iustin Pop
2170 a8083063 Iustin Pop
2171 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
2172 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
2173 a8083063 Iustin Pop

2174 a8083063 Iustin Pop
  """
2175 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2176 6bf01bbb Guido Trotter
  REQ_BGL = False
2177 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
2178 1e288a26 Guido Trotter
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status", "variants")
2179 1e288a26 Guido Trotter
  # Fields that need calculation of global os validity
2180 1e288a26 Guido Trotter
  _FIELDS_NEEDVALID = frozenset(["valid", "variants"])
2181 a8083063 Iustin Pop
2182 6bf01bbb Guido Trotter
  def ExpandNames(self):
2183 1f9430d6 Iustin Pop
    if self.op.names:
2184 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
2185 1f9430d6 Iustin Pop
2186 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2187 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2188 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
2189 1f9430d6 Iustin Pop
2190 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
2191 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
2192 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
2193 6bf01bbb Guido Trotter
    self.needed_locks = {}
2194 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
2195 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2196 6bf01bbb Guido Trotter
2197 6bf01bbb Guido Trotter
  def CheckPrereq(self):
2198 6bf01bbb Guido Trotter
    """Check prerequisites.
2199 6bf01bbb Guido Trotter

2200 6bf01bbb Guido Trotter
    """
2201 6bf01bbb Guido Trotter
2202 1f9430d6 Iustin Pop
  @staticmethod
2203 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
2204 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
2205 1f9430d6 Iustin Pop

2206 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
2207 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
2208 1f9430d6 Iustin Pop

2209 e4376078 Iustin Pop
    @rtype: dict
2210 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
2211 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
2212 e4376078 Iustin Pop

2213 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
2214 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
2215 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
2216 e4376078 Iustin Pop
          }
2217 1f9430d6 Iustin Pop

2218 1f9430d6 Iustin Pop
    """
2219 1f9430d6 Iustin Pop
    all_os = {}
2220 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
2221 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
2222 a6ab004b Iustin Pop
    # make all OSes invalid
2223 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
2224 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
2225 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
2226 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
2227 1f9430d6 Iustin Pop
        continue
2228 ba00557a Guido Trotter
      for name, path, status, diagnose, variants in nr.payload:
2229 255dcebd Iustin Pop
        if name not in all_os:
2230 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
2231 1f9430d6 Iustin Pop
          # for each node in node_list
2232 255dcebd Iustin Pop
          all_os[name] = {}
2233 a6ab004b Iustin Pop
          for nname in good_nodes:
2234 255dcebd Iustin Pop
            all_os[name][nname] = []
2235 ba00557a Guido Trotter
        all_os[name][node_name].append((path, status, diagnose, variants))
2236 1f9430d6 Iustin Pop
    return all_os
2237 a8083063 Iustin Pop
2238 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2239 a8083063 Iustin Pop
    """Compute the list of OSes.
2240 a8083063 Iustin Pop

2241 a8083063 Iustin Pop
    """
2242 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
2243 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
2244 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
2245 1f9430d6 Iustin Pop
    output = []
2246 1e288a26 Guido Trotter
    calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields)
2247 1e288a26 Guido Trotter
    calc_variants = "variants" in self.op.output_fields
2248 1e288a26 Guido Trotter
2249 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
2250 1f9430d6 Iustin Pop
      row = []
2251 1e288a26 Guido Trotter
      if calc_valid:
2252 1e288a26 Guido Trotter
        valid = True
2253 1e288a26 Guido Trotter
        variants = None
2254 1e288a26 Guido Trotter
        for osl in os_data.values():
2255 1e288a26 Guido Trotter
          valid = valid and osl and osl[0][1]
2256 1e288a26 Guido Trotter
          if not valid:
2257 1e288a26 Guido Trotter
            variants = None
2258 1e288a26 Guido Trotter
            break
2259 1e288a26 Guido Trotter
          if calc_variants:
2260 1e288a26 Guido Trotter
            node_variants = osl[0][3]
2261 1e288a26 Guido Trotter
            if variants is None:
2262 1e288a26 Guido Trotter
              variants = node_variants
2263 1e288a26 Guido Trotter
            else:
2264 1e288a26 Guido Trotter
              variants = [v for v in variants if v in node_variants]
2265 1e288a26 Guido Trotter
2266 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
2267 1f9430d6 Iustin Pop
        if field == "name":
2268 1f9430d6 Iustin Pop
          val = os_name
2269 1f9430d6 Iustin Pop
        elif field == "valid":
2270 1e288a26 Guido Trotter
          val = valid
2271 1f9430d6 Iustin Pop
        elif field == "node_status":
2272 255dcebd Iustin Pop
          # this is just a copy of the dict
2273 1f9430d6 Iustin Pop
          val = {}
2274 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
2275 255dcebd Iustin Pop
            val[node_name] = nos_list
2276 1e288a26 Guido Trotter
        elif field == "variants":
2277 1e288a26 Guido Trotter
          val =  variants
2278 1f9430d6 Iustin Pop
        else:
2279 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
2280 1f9430d6 Iustin Pop
        row.append(val)
2281 1f9430d6 Iustin Pop
      output.append(row)
2282 1f9430d6 Iustin Pop
2283 1f9430d6 Iustin Pop
    return output
2284 a8083063 Iustin Pop
2285 a8083063 Iustin Pop
2286 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
2287 a8083063 Iustin Pop
  """Logical unit for removing a node.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
  """
2290 a8083063 Iustin Pop
  HPATH = "node-remove"
2291 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2292 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2293 a8083063 Iustin Pop
2294 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2295 a8083063 Iustin Pop
    """Build hooks env.
2296 a8083063 Iustin Pop

2297 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
2298 d08869ee Guido Trotter
    node would then be impossible to remove.
2299 a8083063 Iustin Pop

2300 a8083063 Iustin Pop
    """
2301 396e1b78 Michael Hanselmann
    env = {
2302 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2303 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
2304 396e1b78 Michael Hanselmann
      }
2305 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
2306 cd46f3b4 Luca Bigliardi
    if self.op.node_name in all_nodes:
2307 cd46f3b4 Luca Bigliardi
      all_nodes.remove(self.op.node_name)
2308 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
  def CheckPrereq(self):
2311 a8083063 Iustin Pop
    """Check prerequisites.
2312 a8083063 Iustin Pop

2313 a8083063 Iustin Pop
    This checks:
2314 a8083063 Iustin Pop
     - the node exists in the configuration
2315 a8083063 Iustin Pop
     - it does not have primary or secondary instances
2316 a8083063 Iustin Pop
     - it's not the master
2317 a8083063 Iustin Pop

2318 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2319 a8083063 Iustin Pop

2320 a8083063 Iustin Pop
    """
2321 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
2322 a8083063 Iustin Pop
    if node is None:
2323 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
2324 a8083063 Iustin Pop
2325 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2326 a8083063 Iustin Pop
2327 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
2328 a8083063 Iustin Pop
    if node.name == masternode:
2329 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
2330 3ecf6786 Iustin Pop
                                 " you need to failover first.")
2331 a8083063 Iustin Pop
2332 a8083063 Iustin Pop
    for instance_name in instance_list:
2333 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
2334 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
2335 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
2336 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
2337 a8083063 Iustin Pop
    self.op.node_name = node.name
2338 a8083063 Iustin Pop
    self.node = node
2339 a8083063 Iustin Pop
2340 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2341 a8083063 Iustin Pop
    """Removes the node from the cluster.
2342 a8083063 Iustin Pop

2343 a8083063 Iustin Pop
    """
2344 a8083063 Iustin Pop
    node = self.node
2345 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
2346 9a4f63d1 Iustin Pop
                 node.name)
2347 a8083063 Iustin Pop
2348 b989b9d9 Ken Wehr
    modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
2349 b989b9d9 Ken Wehr
2350 44485f49 Guido Trotter
    # Promote nodes to master candidate as needed
2351 44485f49 Guido Trotter
    _AdjustCandidatePool(self, exceptions=[node.name])
2352 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
2353 a8083063 Iustin Pop
2354 cd46f3b4 Luca Bigliardi
    # Run post hooks on the node before it's removed
2355 cd46f3b4 Luca Bigliardi
    hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
2356 cd46f3b4 Luca Bigliardi
    try:
2357 cd46f3b4 Luca Bigliardi
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name])
2358 3cb5c1e3 Luca Bigliardi
    except:
2359 3cb5c1e3 Luca Bigliardi
      self.LogWarning("Errors occurred running hooks on %s" % node.name)
2360 cd46f3b4 Luca Bigliardi
2361 b989b9d9 Ken Wehr
    result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
2362 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2363 0623d351 Iustin Pop
    if msg:
2364 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
2365 0623d351 Iustin Pop
                      " the cluster: %s", msg)
2366 c8a0948f Michael Hanselmann
2367 a8083063 Iustin Pop
2368 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
2369 a8083063 Iustin Pop
  """Logical unit for querying nodes.
2370 a8083063 Iustin Pop

2371 a8083063 Iustin Pop
  """
2372 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
2373 35705d8f Guido Trotter
  REQ_BGL = False
2374 19bed813 Iustin Pop
2375 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "serial_no", "ctime", "mtime", "uuid",
2376 19bed813 Iustin Pop
                    "master_candidate", "offline", "drained"]
2377 19bed813 Iustin Pop
2378 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
2379 31bf511f Iustin Pop
    "dtotal", "dfree",
2380 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
2381 31bf511f Iustin Pop
    "bootid",
2382 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
2383 31bf511f Iustin Pop
    )
2384 31bf511f Iustin Pop
2385 19bed813 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*[
2386 19bed813 Iustin Pop
    "pinst_cnt", "sinst_cnt",
2387 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2388 31bf511f Iustin Pop
    "pip", "sip", "tags",
2389 0e67cdbe Iustin Pop
    "master",
2390 19bed813 Iustin Pop
    "role"] + _SIMPLE_FIELDS
2391 31bf511f Iustin Pop
    )
2392 a8083063 Iustin Pop
2393 35705d8f Guido Trotter
  def ExpandNames(self):
2394 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2395 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2396 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2397 a8083063 Iustin Pop
2398 35705d8f Guido Trotter
    self.needed_locks = {}
2399 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2400 c8d8b4c8 Iustin Pop
2401 c8d8b4c8 Iustin Pop
    if self.op.names:
2402 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2403 35705d8f Guido Trotter
    else:
2404 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2405 c8d8b4c8 Iustin Pop
2406 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2407 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2408 c8d8b4c8 Iustin Pop
    if self.do_locking:
2409 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2410 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2411 c8d8b4c8 Iustin Pop
2412 35705d8f Guido Trotter
  def CheckPrereq(self):
2413 35705d8f Guido Trotter
    """Check prerequisites.
2414 35705d8f Guido Trotter

2415 35705d8f Guido Trotter
    """
2416 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2417 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2418 c8d8b4c8 Iustin Pop
    pass
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2421 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2422 a8083063 Iustin Pop

2423 a8083063 Iustin Pop
    """
2424 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2425 c8d8b4c8 Iustin Pop
    if self.do_locking:
2426 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2427 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2428 3fa93523 Guido Trotter
      nodenames = self.wanted
2429 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2430 3fa93523 Guido Trotter
      if missing:
2431 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2432 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2433 c8d8b4c8 Iustin Pop
    else:
2434 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2435 c1f1cbb2 Iustin Pop
2436 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2437 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2438 a8083063 Iustin Pop
2439 a8083063 Iustin Pop
    # begin data gathering
2440 a8083063 Iustin Pop
2441 bc8e4a1a Iustin Pop
    if self.do_node_query:
2442 a8083063 Iustin Pop
      live_data = {}
2443 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2444 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2445 a8083063 Iustin Pop
      for name in nodenames:
2446 781de953 Iustin Pop
        nodeinfo = node_data[name]
2447 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2448 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2449 d599d686 Iustin Pop
          fn = utils.TryConvert
2450 a8083063 Iustin Pop
          live_data[name] = {
2451 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2452 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2453 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2454 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2455 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2456 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2457 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2458 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2459 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2460 a8083063 Iustin Pop
            }
2461 a8083063 Iustin Pop
        else:
2462 a8083063 Iustin Pop
          live_data[name] = {}
2463 a8083063 Iustin Pop
    else:
2464 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2465 a8083063 Iustin Pop
2466 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2467 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2468 a8083063 Iustin Pop
2469 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2470 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2471 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2472 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2473 a8083063 Iustin Pop
2474 ec223efb Iustin Pop
      for instance_name in instancelist:
2475 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2476 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2477 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2478 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2479 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2480 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2481 a8083063 Iustin Pop
2482 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2483 0e67cdbe Iustin Pop
2484 a8083063 Iustin Pop
    # end data gathering
2485 a8083063 Iustin Pop
2486 a8083063 Iustin Pop
    output = []
2487 a8083063 Iustin Pop
    for node in nodelist:
2488 a8083063 Iustin Pop
      node_output = []
2489 a8083063 Iustin Pop
      for field in self.op.output_fields:
2490 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
2491 19bed813 Iustin Pop
          val = getattr(node, field)
2492 ec223efb Iustin Pop
        elif field == "pinst_list":
2493 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2494 ec223efb Iustin Pop
        elif field == "sinst_list":
2495 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2496 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2497 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2498 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2499 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2500 a8083063 Iustin Pop
        elif field == "pip":
2501 a8083063 Iustin Pop
          val = node.primary_ip
2502 a8083063 Iustin Pop
        elif field == "sip":
2503 a8083063 Iustin Pop
          val = node.secondary_ip
2504 130a6a6f Iustin Pop
        elif field == "tags":
2505 130a6a6f Iustin Pop
          val = list(node.GetTags())
2506 0e67cdbe Iustin Pop
        elif field == "master":
2507 0e67cdbe Iustin Pop
          val = node.name == master_node
2508 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2509 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2510 c120ff34 Iustin Pop
        elif field == "role":
2511 c120ff34 Iustin Pop
          if node.name == master_node:
2512 c120ff34 Iustin Pop
            val = "M"
2513 c120ff34 Iustin Pop
          elif node.master_candidate:
2514 c120ff34 Iustin Pop
            val = "C"
2515 c120ff34 Iustin Pop
          elif node.drained:
2516 c120ff34 Iustin Pop
            val = "D"
2517 c120ff34 Iustin Pop
          elif node.offline:
2518 c120ff34 Iustin Pop
            val = "O"
2519 c120ff34 Iustin Pop
          else:
2520 c120ff34 Iustin Pop
            val = "R"
2521 a8083063 Iustin Pop
        else:
2522 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2523 a8083063 Iustin Pop
        node_output.append(val)
2524 a8083063 Iustin Pop
      output.append(node_output)
2525 a8083063 Iustin Pop
2526 a8083063 Iustin Pop
    return output
2527 a8083063 Iustin Pop
2528 a8083063 Iustin Pop
2529 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2530 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2531 dcb93971 Michael Hanselmann

2532 dcb93971 Michael Hanselmann
  """
2533 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2534 21a15682 Guido Trotter
  REQ_BGL = False
2535 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2536 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2537 21a15682 Guido Trotter
2538 21a15682 Guido Trotter
  def ExpandNames(self):
2539 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2540 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2541 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2542 21a15682 Guido Trotter
2543 21a15682 Guido Trotter
    self.needed_locks = {}
2544 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2545 21a15682 Guido Trotter
    if not self.op.nodes:
2546 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2547 21a15682 Guido Trotter
    else:
2548 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2549 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2550 dcb93971 Michael Hanselmann
2551 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2552 dcb93971 Michael Hanselmann
    """Check prerequisites.
2553 dcb93971 Michael Hanselmann

2554 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2555 dcb93971 Michael Hanselmann

2556 dcb93971 Michael Hanselmann
    """
2557 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2558 dcb93971 Michael Hanselmann
2559 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2560 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2561 dcb93971 Michael Hanselmann

2562 dcb93971 Michael Hanselmann
    """
2563 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2564 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2565 dcb93971 Michael Hanselmann
2566 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2567 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2568 dcb93971 Michael Hanselmann
2569 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2570 dcb93971 Michael Hanselmann
2571 dcb93971 Michael Hanselmann
    output = []
2572 dcb93971 Michael Hanselmann
    for node in nodenames:
2573 10bfe6cb Iustin Pop
      nresult = volumes[node]
2574 10bfe6cb Iustin Pop
      if nresult.offline:
2575 10bfe6cb Iustin Pop
        continue
2576 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2577 10bfe6cb Iustin Pop
      if msg:
2578 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2579 37d19eb2 Michael Hanselmann
        continue
2580 37d19eb2 Michael Hanselmann
2581 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2582 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2583 dcb93971 Michael Hanselmann
2584 dcb93971 Michael Hanselmann
      for vol in node_vols:
2585 dcb93971 Michael Hanselmann
        node_output = []
2586 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2587 dcb93971 Michael Hanselmann
          if field == "node":
2588 dcb93971 Michael Hanselmann
            val = node
2589 dcb93971 Michael Hanselmann
          elif field == "phys":
2590 dcb93971 Michael Hanselmann
            val = vol['dev']
2591 dcb93971 Michael Hanselmann
          elif field == "vg":
2592 dcb93971 Michael Hanselmann
            val = vol['vg']
2593 dcb93971 Michael Hanselmann
          elif field == "name":
2594 dcb93971 Michael Hanselmann
            val = vol['name']
2595 dcb93971 Michael Hanselmann
          elif field == "size":
2596 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2597 dcb93971 Michael Hanselmann
          elif field == "instance":
2598 dcb93971 Michael Hanselmann
            for inst in ilist:
2599 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2600 dcb93971 Michael Hanselmann
                continue
2601 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2602 dcb93971 Michael Hanselmann
                val = inst.name
2603 dcb93971 Michael Hanselmann
                break
2604 dcb93971 Michael Hanselmann
            else:
2605 dcb93971 Michael Hanselmann
              val = '-'
2606 dcb93971 Michael Hanselmann
          else:
2607 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2608 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2609 dcb93971 Michael Hanselmann
2610 dcb93971 Michael Hanselmann
        output.append(node_output)
2611 dcb93971 Michael Hanselmann
2612 dcb93971 Michael Hanselmann
    return output
2613 dcb93971 Michael Hanselmann
2614 dcb93971 Michael Hanselmann
2615 9e5442ce Michael Hanselmann
class LUQueryNodeStorage(NoHooksLU):
2616 9e5442ce Michael Hanselmann
  """Logical unit for getting information on storage units on node(s).
2617 9e5442ce Michael Hanselmann

2618 9e5442ce Michael Hanselmann
  """
2619 9e5442ce Michael Hanselmann
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
2620 9e5442ce Michael Hanselmann
  REQ_BGL = False
2621 620a85fd Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
2622 9e5442ce Michael Hanselmann
2623 9e5442ce Michael Hanselmann
  def ExpandNames(self):
2624 9e5442ce Michael Hanselmann
    storage_type = self.op.storage_type
2625 9e5442ce Michael Hanselmann
2626 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
2627 9e5442ce Michael Hanselmann
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type)
2628 9e5442ce Michael Hanselmann
2629 9e5442ce Michael Hanselmann
    _CheckOutputFields(static=self._FIELDS_STATIC,
2630 620a85fd Iustin Pop
                       dynamic=utils.FieldSet(*constants.VALID_STORAGE_FIELDS),
2631 9e5442ce Michael Hanselmann
                       selected=self.op.output_fields)
2632 9e5442ce Michael Hanselmann
2633 9e5442ce Michael Hanselmann
    self.needed_locks = {}
2634 9e5442ce Michael Hanselmann
    self.share_locks[locking.LEVEL_NODE] = 1
2635 9e5442ce Michael Hanselmann
2636 9e5442ce Michael Hanselmann
    if self.op.nodes:
2637 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = \
2638 9e5442ce Michael Hanselmann
        _GetWantedNodes(self, self.op.nodes)
2639 9e5442ce Michael Hanselmann
    else:
2640 9e5442ce Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2641 9e5442ce Michael Hanselmann
2642 9e5442ce Michael Hanselmann
  def CheckPrereq(self):
2643 9e5442ce Michael Hanselmann
    """Check prerequisites.
2644 9e5442ce Michael Hanselmann

2645 9e5442ce Michael Hanselmann
    This checks that the fields required are valid output fields.
2646 9e5442ce Michael Hanselmann

2647 9e5442ce Michael Hanselmann
    """
2648 9e5442ce Michael Hanselmann
    self.op.name = getattr(self.op, "name", None)
2649 9e5442ce Michael Hanselmann
2650 9e5442ce Michael Hanselmann
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2651 9e5442ce Michael Hanselmann
2652 9e5442ce Michael Hanselmann
  def Exec(self, feedback_fn):
2653 9e5442ce Michael Hanselmann
    """Computes the list of nodes and their attributes.
2654 9e5442ce Michael Hanselmann

2655 9e5442ce Michael Hanselmann
    """
2656 9e5442ce Michael Hanselmann
    # Always get name to sort by
2657 9e5442ce Michael Hanselmann
    if constants.SF_NAME in self.op.output_fields:
2658 9e5442ce Michael Hanselmann
      fields = self.op.output_fields[:]
2659 9e5442ce Michael Hanselmann
    else:
2660 9e5442ce Michael Hanselmann
      fields = [constants.SF_NAME] + self.op.output_fields
2661 9e5442ce Michael Hanselmann
2662 620a85fd Iustin Pop
    # Never ask for node or type as it's only known to the LU
2663 620a85fd Iustin Pop
    for extra in [constants.SF_NODE, constants.SF_TYPE]:
2664 620a85fd Iustin Pop
      while extra in fields:
2665 620a85fd Iustin Pop
        fields.remove(extra)
2666 9e5442ce Michael Hanselmann
2667 9e5442ce Michael Hanselmann
    field_idx = dict([(name, idx) for (idx, name) in enumerate(fields)])
2668 9e5442ce Michael Hanselmann
    name_idx = field_idx[constants.SF_NAME]
2669 9e5442ce Michael Hanselmann
2670 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2671 9e5442ce Michael Hanselmann
    data = self.rpc.call_storage_list(self.nodes,
2672 9e5442ce Michael Hanselmann
                                      self.op.storage_type, st_args,
2673 9e5442ce Michael Hanselmann
                                      self.op.name, fields)
2674 9e5442ce Michael Hanselmann
2675 9e5442ce Michael Hanselmann
    result = []
2676 9e5442ce Michael Hanselmann
2677 9e5442ce Michael Hanselmann
    for node in utils.NiceSort(self.nodes):
2678 9e5442ce Michael Hanselmann
      nresult = data[node]
2679 9e5442ce Michael Hanselmann
      if nresult.offline:
2680 9e5442ce Michael Hanselmann
        continue
2681 9e5442ce Michael Hanselmann
2682 9e5442ce Michael Hanselmann
      msg = nresult.fail_msg
2683 9e5442ce Michael Hanselmann
      if msg:
2684 9e5442ce Michael Hanselmann
        self.LogWarning("Can't get storage data from node %s: %s", node, msg)
2685 9e5442ce Michael Hanselmann
        continue
2686 9e5442ce Michael Hanselmann
2687 9e5442ce Michael Hanselmann
      rows = dict([(row[name_idx], row) for row in nresult.payload])
2688 9e5442ce Michael Hanselmann
2689 9e5442ce Michael Hanselmann
      for name in utils.NiceSort(rows.keys()):
2690 9e5442ce Michael Hanselmann
        row = rows[name]
2691 9e5442ce Michael Hanselmann
2692 9e5442ce Michael Hanselmann
        out = []
2693 9e5442ce Michael Hanselmann
2694 9e5442ce Michael Hanselmann
        for field in self.op.output_fields:
2695 620a85fd Iustin Pop
          if field == constants.SF_NODE:
2696 9e5442ce Michael Hanselmann
            val = node
2697 620a85fd Iustin Pop
          elif field == constants.SF_TYPE:
2698 620a85fd Iustin Pop
            val = self.op.storage_type
2699 9e5442ce Michael Hanselmann
          elif field in field_idx:
2700 9e5442ce Michael Hanselmann
            val = row[field_idx[field]]
2701 9e5442ce Michael Hanselmann
          else:
2702 9e5442ce Michael Hanselmann
            raise errors.ParameterError(field)
2703 9e5442ce Michael Hanselmann
2704 9e5442ce Michael Hanselmann
          out.append(val)
2705 9e5442ce Michael Hanselmann
2706 9e5442ce Michael Hanselmann
        result.append(out)
2707 9e5442ce Michael Hanselmann
2708 9e5442ce Michael Hanselmann
    return result
2709 9e5442ce Michael Hanselmann
2710 9e5442ce Michael Hanselmann
2711 efb8da02 Michael Hanselmann
class LUModifyNodeStorage(NoHooksLU):
2712 efb8da02 Michael Hanselmann
  """Logical unit for modifying a storage volume on a node.
2713 efb8da02 Michael Hanselmann

2714 efb8da02 Michael Hanselmann
  """
2715 efb8da02 Michael Hanselmann
  _OP_REQP = ["node_name", "storage_type", "name", "changes"]
2716 efb8da02 Michael Hanselmann
  REQ_BGL = False
2717 efb8da02 Michael Hanselmann
2718 efb8da02 Michael Hanselmann
  def CheckArguments(self):
2719 efb8da02 Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2720 efb8da02 Michael Hanselmann
    if node_name is None:
2721 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2722 efb8da02 Michael Hanselmann
2723 efb8da02 Michael Hanselmann
    self.op.node_name = node_name
2724 efb8da02 Michael Hanselmann
2725 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2726 620a85fd Iustin Pop
    if storage_type not in constants.VALID_STORAGE_TYPES:
2727 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Unknown storage type: %s" % storage_type)
2728 efb8da02 Michael Hanselmann
2729 efb8da02 Michael Hanselmann
  def ExpandNames(self):
2730 efb8da02 Michael Hanselmann
    self.needed_locks = {
2731 efb8da02 Michael Hanselmann
      locking.LEVEL_NODE: self.op.node_name,
2732 efb8da02 Michael Hanselmann
      }
2733 efb8da02 Michael Hanselmann
2734 efb8da02 Michael Hanselmann
  def CheckPrereq(self):
2735 efb8da02 Michael Hanselmann
    """Check prerequisites.
2736 efb8da02 Michael Hanselmann

2737 efb8da02 Michael Hanselmann
    """
2738 efb8da02 Michael Hanselmann
    storage_type = self.op.storage_type
2739 efb8da02 Michael Hanselmann
2740 efb8da02 Michael Hanselmann
    try:
2741 efb8da02 Michael Hanselmann
      modifiable = constants.MODIFIABLE_STORAGE_FIELDS[storage_type]
2742 efb8da02 Michael Hanselmann
    except KeyError:
2743 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
2744 efb8da02 Michael Hanselmann
                                 " modified" % storage_type)
2745 efb8da02 Michael Hanselmann
2746 efb8da02 Michael Hanselmann
    diff = set(self.op.changes.keys()) - modifiable
2747 efb8da02 Michael Hanselmann
    if diff:
2748 efb8da02 Michael Hanselmann
      raise errors.OpPrereqError("The following fields can not be modified for"
2749 efb8da02 Michael Hanselmann
                                 " storage units of type '%s': %r" %
2750 efb8da02 Michael Hanselmann
                                 (storage_type, list(diff)))
2751 efb8da02 Michael Hanselmann
2752 efb8da02 Michael Hanselmann
  def Exec(self, feedback_fn):
2753 efb8da02 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2754 efb8da02 Michael Hanselmann

2755 efb8da02 Michael Hanselmann
    """
2756 efb8da02 Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
2757 efb8da02 Michael Hanselmann
    result = self.rpc.call_storage_modify(self.op.node_name,
2758 efb8da02 Michael Hanselmann
                                          self.op.storage_type, st_args,
2759 efb8da02 Michael Hanselmann
                                          self.op.name, self.op.changes)
2760 efb8da02 Michael Hanselmann
    result.Raise("Failed to modify storage unit '%s' on %s" %
2761 efb8da02 Michael Hanselmann
                 (self.op.name, self.op.node_name))
2762 efb8da02 Michael Hanselmann
2763 efb8da02 Michael Hanselmann
2764 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2765 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2766 a8083063 Iustin Pop

2767 a8083063 Iustin Pop
  """
2768 a8083063 Iustin Pop
  HPATH = "node-add"
2769 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2770 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2771 a8083063 Iustin Pop
2772 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2773 a8083063 Iustin Pop
    """Build hooks env.
2774 a8083063 Iustin Pop

2775 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2776 a8083063 Iustin Pop

2777 a8083063 Iustin Pop
    """
2778 a8083063 Iustin Pop
    env = {
2779 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2780 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2781 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2782 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2783 a8083063 Iustin Pop
      }
2784 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2785 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2786 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
  def CheckPrereq(self):
2789 a8083063 Iustin Pop
    """Check prerequisites.
2790 a8083063 Iustin Pop

2791 a8083063 Iustin Pop
    This checks:
2792 a8083063 Iustin Pop
     - the new node is not already in the config
2793 a8083063 Iustin Pop
     - it is resolvable
2794 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2795 a8083063 Iustin Pop

2796 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2797 a8083063 Iustin Pop

2798 a8083063 Iustin Pop
    """
2799 a8083063 Iustin Pop
    node_name = self.op.node_name
2800 a8083063 Iustin Pop
    cfg = self.cfg
2801 a8083063 Iustin Pop
2802 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2803 a8083063 Iustin Pop
2804 bcf043c9 Iustin Pop
    node = dns_data.name
2805 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2806 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2807 a8083063 Iustin Pop
    if secondary_ip is None:
2808 a8083063 Iustin Pop
      secondary_ip = primary_ip
2809 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2810 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2811 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2812 e7c6e02b Michael Hanselmann
2813 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2814 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2815 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2816 e7c6e02b Michael Hanselmann
                                 node)
2817 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2818 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2819 a8083063 Iustin Pop
2820 a8083063 Iustin Pop
    for existing_node_name in node_list:
2821 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2822 e7c6e02b Michael Hanselmann
2823 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2824 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2825 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2826 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2827 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2828 e7c6e02b Michael Hanselmann
        continue
2829 e7c6e02b Michael Hanselmann
2830 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2831 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2832 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2833 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2834 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2835 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2836 a8083063 Iustin Pop
2837 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2838 a8083063 Iustin Pop
    # same as for the master
2839 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2840 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2841 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2842 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2843 a8083063 Iustin Pop
      if master_singlehomed:
2844 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2845 3ecf6786 Iustin Pop
                                   " new node has one")
2846 a8083063 Iustin Pop
      else:
2847 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2848 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2849 a8083063 Iustin Pop
2850 5bbd3f7f Michael Hanselmann
    # checks reachability
2851 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2852 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2853 a8083063 Iustin Pop
2854 a8083063 Iustin Pop
    if not newbie_singlehomed:
2855 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2856 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2857 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2858 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2859 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2860 a8083063 Iustin Pop
2861 a8ae3eb5 Iustin Pop
    if self.op.readd:
2862 a8ae3eb5 Iustin Pop
      exceptions = [node]
2863 a8ae3eb5 Iustin Pop
    else:
2864 a8ae3eb5 Iustin Pop
      exceptions = []
2865 6d7e1f20 Guido Trotter
2866 6d7e1f20 Guido Trotter
    self.master_candidate = _DecideSelfPromotion(self, exceptions=exceptions)
2867 0fff97e9 Guido Trotter
2868 a8ae3eb5 Iustin Pop
    if self.op.readd:
2869 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2870 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2871 a8ae3eb5 Iustin Pop
    else:
2872 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2873 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2874 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2875 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2876 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2877 a8083063 Iustin Pop
2878 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2879 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2880 a8083063 Iustin Pop

2881 a8083063 Iustin Pop
    """
2882 a8083063 Iustin Pop
    new_node = self.new_node
2883 a8083063 Iustin Pop
    node = new_node.name
2884 a8083063 Iustin Pop
2885 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2886 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2887 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2888 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2889 a8ae3eb5 Iustin Pop
    if self.op.readd:
2890 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2891 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2892 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2893 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2894 a8ae3eb5 Iustin Pop
2895 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2896 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2897 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2898 a8ae3eb5 Iustin Pop
2899 a8083063 Iustin Pop
    # check connectivity
2900 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2901 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2902 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2903 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2904 90b54c26 Iustin Pop
                   node, result.payload)
2905 a8083063 Iustin Pop
    else:
2906 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2907 90b54c26 Iustin Pop
                               " node version %s" %
2908 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
    # setup ssh on node
2911 b989b9d9 Ken Wehr
    if self.cfg.GetClusterInfo().modify_ssh_setup:
2912 b989b9d9 Ken Wehr
      logging.info("Copy ssh key to node %s", node)
2913 b989b9d9 Ken Wehr
      priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2914 b989b9d9 Ken Wehr
      keyarray = []
2915 b989b9d9 Ken Wehr
      keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2916 b989b9d9 Ken Wehr
                  constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2917 b989b9d9 Ken Wehr
                  priv_key, pub_key]
2918 b989b9d9 Ken Wehr
2919 b989b9d9 Ken Wehr
      for i in keyfiles:
2920 b989b9d9 Ken Wehr
        keyarray.append(utils.ReadFile(i))
2921 b989b9d9 Ken Wehr
2922 b989b9d9 Ken Wehr
      result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2923 b989b9d9 Ken Wehr
                                      keyarray[2], keyarray[3], keyarray[4],
2924 b989b9d9 Ken Wehr
                                      keyarray[5])
2925 b989b9d9 Ken Wehr
      result.Raise("Cannot transfer ssh keys to the new node")
2926 a8083063 Iustin Pop
2927 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2928 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2929 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2930 c8a0948f Michael Hanselmann
2931 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2932 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2933 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2934 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2935 4c4e4e1e Iustin Pop
                   prereq=True)
2936 c2fc8250 Iustin Pop
      if not result.payload:
2937 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2938 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2939 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2940 a8083063 Iustin Pop
2941 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2942 5c0527ed Guido Trotter
    node_verify_param = {
2943 f60759f7 Iustin Pop
      constants.NV_NODELIST: [node],
2944 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2945 5c0527ed Guido Trotter
    }
2946 5c0527ed Guido Trotter
2947 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2948 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2949 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2950 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2951 f60759f7 Iustin Pop
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
2952 6f68a739 Iustin Pop
      if nl_payload:
2953 6f68a739 Iustin Pop
        for failed in nl_payload:
2954 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2955 31821208 Iustin Pop
                      " (checking from %s): %s" %
2956 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2957 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2958 ff98055b Iustin Pop
2959 d8470559 Michael Hanselmann
    if self.op.readd:
2960 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2961 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2962 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2963 a4eae71f Michael Hanselmann
      self.cfg.Update(new_node, feedback_fn)
2964 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2965 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2966 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2967 3cebe102 Michael Hanselmann
        msg = result.fail_msg
2968 a8ae3eb5 Iustin Pop
        if msg:
2969 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2970 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2971 d8470559 Michael Hanselmann
    else:
2972 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2973 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2974 a8083063 Iustin Pop
2975 a8083063 Iustin Pop
2976 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2977 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2978 b31c8676 Iustin Pop

2979 b31c8676 Iustin Pop
  """
2980 b31c8676 Iustin Pop
  HPATH = "node-modify"
2981 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2982 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2983 b31c8676 Iustin Pop
  REQ_BGL = False
2984 b31c8676 Iustin Pop
2985 b31c8676 Iustin Pop
  def CheckArguments(self):
2986 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2987 b31c8676 Iustin Pop
    if node_name is None:
2988 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2989 b31c8676 Iustin Pop
    self.op.node_name = node_name
2990 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2991 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2992 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2993 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2994 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2995 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2996 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2997 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2998 c9d443ea Iustin Pop
                                 " state at the same time")
2999 b31c8676 Iustin Pop
3000 b31c8676 Iustin Pop
  def ExpandNames(self):
3001 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
3002 b31c8676 Iustin Pop
3003 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
3004 b31c8676 Iustin Pop
    """Build hooks env.
3005 b31c8676 Iustin Pop

3006 b31c8676 Iustin Pop
    This runs on the master node.
3007 b31c8676 Iustin Pop

3008 b31c8676 Iustin Pop
    """
3009 b31c8676 Iustin Pop
    env = {
3010 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
3011 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
3012 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
3013 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
3014 b31c8676 Iustin Pop
      }
3015 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
3016 b31c8676 Iustin Pop
          self.op.node_name]
3017 b31c8676 Iustin Pop
    return env, nl, nl
3018 b31c8676 Iustin Pop
3019 b31c8676 Iustin Pop
  def CheckPrereq(self):
3020 b31c8676 Iustin Pop
    """Check prerequisites.
3021 b31c8676 Iustin Pop

3022 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
3023 b31c8676 Iustin Pop

3024 b31c8676 Iustin Pop
    """
3025 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
3026 b31c8676 Iustin Pop
3027 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
3028 97c61d46 Iustin Pop
        self.op.drained is not None or
3029 97c61d46 Iustin Pop
        self.op.offline is not None):
3030 97c61d46 Iustin Pop
      # we can't change the master's node flags
3031 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
3032 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
3033 97c61d46 Iustin Pop
                                   " only via masterfailover")
3034 97c61d46 Iustin Pop
3035 8fbf5ac7 Guido Trotter
    # Boolean value that tells us whether we're offlining or draining the node
3036 8fbf5ac7 Guido Trotter
    offline_or_drain = self.op.offline == True or self.op.drained == True
3037 3d9eb52b Guido Trotter
    deoffline_or_drain = self.op.offline == False or self.op.drained == False
3038 8fbf5ac7 Guido Trotter
3039 8fbf5ac7 Guido Trotter
    if (node.master_candidate and
3040 8fbf5ac7 Guido Trotter
        (self.op.master_candidate == False or offline_or_drain)):
3041 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
3042 8fbf5ac7 Guido Trotter
      mc_now, mc_should, mc_max = self.cfg.GetMasterCandidateStats()
3043 8fbf5ac7 Guido Trotter
      if mc_now <= cp_size:
3044 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
3045 8fbf5ac7 Guido Trotter
               " %d, new value will be %d)" % (cp_size, mc_now-1))
3046 8fbf5ac7 Guido Trotter
        # Only allow forcing the operation if it's an offline/drain operation,
3047 8fbf5ac7 Guido Trotter
        # and we could not possibly promote more nodes.
3048 8fbf5ac7 Guido Trotter
        # FIXME: this can still lead to issues if in any way another node which
3049 8fbf5ac7 Guido Trotter
        # could be promoted appears in the meantime.
3050 8fbf5ac7 Guido Trotter
        if self.op.force and offline_or_drain and mc_should == mc_max:
3051 3e83dd48 Iustin Pop
          self.LogWarning(msg)
3052 3e83dd48 Iustin Pop
        else:
3053 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
3054 3e83dd48 Iustin Pop
3055 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
3056 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
3057 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
3058 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
3059 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
3060 3a5ba66a Iustin Pop
3061 3d9eb52b Guido Trotter
    # If we're being deofflined/drained, we'll MC ourself if needed
3062 3d9eb52b Guido Trotter
    if (deoffline_or_drain and not offline_or_drain and not
3063 3d9eb52b Guido Trotter
        self.op.master_candidate == True):
3064 3d9eb52b Guido Trotter
      self.op.master_candidate = _DecideSelfPromotion(self)
3065 3d9eb52b Guido Trotter
      if self.op.master_candidate:
3066 3d9eb52b Guido Trotter
        self.LogInfo("Autopromoting node to master candidate")
3067 3d9eb52b Guido Trotter
3068 b31c8676 Iustin Pop
    return
3069 b31c8676 Iustin Pop
3070 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
3071 b31c8676 Iustin Pop
    """Modifies a node.
3072 b31c8676 Iustin Pop

3073 b31c8676 Iustin Pop
    """
3074 3a5ba66a Iustin Pop
    node = self.node
3075 b31c8676 Iustin Pop
3076 b31c8676 Iustin Pop
    result = []
3077 c9d443ea Iustin Pop
    changed_mc = False
3078 b31c8676 Iustin Pop
3079 3a5ba66a Iustin Pop
    if self.op.offline is not None:
3080 3a5ba66a Iustin Pop
      node.offline = self.op.offline
3081 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
3082 c9d443ea Iustin Pop
      if self.op.offline == True:
3083 c9d443ea Iustin Pop
        if node.master_candidate:
3084 c9d443ea Iustin Pop
          node.master_candidate = False
3085 c9d443ea Iustin Pop
          changed_mc = True
3086 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
3087 c9d443ea Iustin Pop
        if node.drained:
3088 c9d443ea Iustin Pop
          node.drained = False
3089 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
3090 3a5ba66a Iustin Pop
3091 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
3092 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
3093 c9d443ea Iustin Pop
      changed_mc = True
3094 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
3095 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
3096 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
3097 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
3098 0959c824 Iustin Pop
        if msg:
3099 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
3100 b31c8676 Iustin Pop
3101 c9d443ea Iustin Pop
    if self.op.drained is not None:
3102 c9d443ea Iustin Pop
      node.drained = self.op.drained
3103 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
3104 c9d443ea Iustin Pop
      if self.op.drained == True:
3105 c9d443ea Iustin Pop
        if node.master_candidate:
3106 c9d443ea Iustin Pop
          node.master_candidate = False
3107 c9d443ea Iustin Pop
          changed_mc = True
3108 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
3109 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
3110 3cebe102 Michael Hanselmann
          msg = rrc.fail_msg
3111 dec0d9da Iustin Pop
          if msg:
3112 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
3113 c9d443ea Iustin Pop
        if node.offline:
3114 c9d443ea Iustin Pop
          node.offline = False
3115 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
3116 c9d443ea Iustin Pop
3117 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
3118 a4eae71f Michael Hanselmann
    self.cfg.Update(node, feedback_fn)
3119 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
3120 c9d443ea Iustin Pop
    if changed_mc:
3121 3a26773f Iustin Pop
      self.context.ReaddNode(node)
3122 b31c8676 Iustin Pop
3123 b31c8676 Iustin Pop
    return result
3124 b31c8676 Iustin Pop
3125 b31c8676 Iustin Pop
3126 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
3127 f5118ade Iustin Pop
  """Powercycles a node.
3128 f5118ade Iustin Pop

3129 f5118ade Iustin Pop
  """
3130 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
3131 f5118ade Iustin Pop
  REQ_BGL = False
3132 f5118ade Iustin Pop
3133 f5118ade Iustin Pop
  def CheckArguments(self):
3134 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
3135 f5118ade Iustin Pop
    if node_name is None:
3136 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
3137 f5118ade Iustin Pop
    self.op.node_name = node_name
3138 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
3139 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
3140 f5118ade Iustin Pop
                                 " parameter was not set")
3141 f5118ade Iustin Pop
3142 f5118ade Iustin Pop
  def ExpandNames(self):
3143 f5118ade Iustin Pop
    """Locking for PowercycleNode.
3144 f5118ade Iustin Pop

3145 efb8da02 Michael Hanselmann
    This is a last-resort option and shouldn't block on other
3146 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
3147 f5118ade Iustin Pop

3148 f5118ade Iustin Pop
    """
3149 f5118ade Iustin Pop
    self.needed_locks = {}
3150 f5118ade Iustin Pop
3151 f5118ade Iustin Pop
  def CheckPrereq(self):
3152 f5118ade Iustin Pop
    """Check prerequisites.
3153 f5118ade Iustin Pop

3154 f5118ade Iustin Pop
    This LU has no prereqs.
3155 f5118ade Iustin Pop

3156 f5118ade Iustin Pop
    """
3157 f5118ade Iustin Pop
    pass
3158 f5118ade Iustin Pop
3159 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
3160 f5118ade Iustin Pop
    """Reboots a node.
3161 f5118ade Iustin Pop

3162 f5118ade Iustin Pop
    """
3163 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
3164 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
3165 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
3166 f5118ade Iustin Pop
    return result.payload
3167 f5118ade Iustin Pop
3168 f5118ade Iustin Pop
3169 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
3170 a8083063 Iustin Pop
  """Query cluster configuration.
3171 a8083063 Iustin Pop

3172 a8083063 Iustin Pop
  """
3173 a8083063 Iustin Pop
  _OP_REQP = []
3174 642339cf Guido Trotter
  REQ_BGL = False
3175 642339cf Guido Trotter
3176 642339cf Guido Trotter
  def ExpandNames(self):
3177 642339cf Guido Trotter
    self.needed_locks = {}
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
  def CheckPrereq(self):
3180 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
3181 a8083063 Iustin Pop

3182 a8083063 Iustin Pop
    """
3183 a8083063 Iustin Pop
    pass
3184 a8083063 Iustin Pop
3185 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3186 a8083063 Iustin Pop
    """Return cluster config.
3187 a8083063 Iustin Pop

3188 a8083063 Iustin Pop
    """
3189 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
3190 a8083063 Iustin Pop
    result = {
3191 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
3192 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
3193 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
3194 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
3195 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
3196 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
3197 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
3198 469f88e1 Iustin Pop
      "master": cluster.master_node,
3199 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
3200 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
3201 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
3202 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
3203 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
3204 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
3205 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
3206 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
3207 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
3208 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
3209 90f72445 Iustin Pop
      "ctime": cluster.ctime,
3210 90f72445 Iustin Pop
      "mtime": cluster.mtime,
3211 259578eb Iustin Pop
      "uuid": cluster.uuid,
3212 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
3213 a8083063 Iustin Pop
      }
3214 a8083063 Iustin Pop
3215 a8083063 Iustin Pop
    return result
3216 a8083063 Iustin Pop
3217 a8083063 Iustin Pop
3218 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
3219 ae5849b5 Michael Hanselmann
  """Return configuration values.
3220 a8083063 Iustin Pop

3221 a8083063 Iustin Pop
  """
3222 a8083063 Iustin Pop
  _OP_REQP = []
3223 642339cf Guido Trotter
  REQ_BGL = False
3224 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
3225 05e50653 Michael Hanselmann
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag",
3226 05e50653 Michael Hanselmann
                                  "watcher_pause")
3227 642339cf Guido Trotter
3228 642339cf Guido Trotter
  def ExpandNames(self):
3229 642339cf Guido Trotter
    self.needed_locks = {}
3230 a8083063 Iustin Pop
3231 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3232 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3233 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
3234 ae5849b5 Michael Hanselmann
3235 a8083063 Iustin Pop
  def CheckPrereq(self):
3236 a8083063 Iustin Pop
    """No prerequisites.
3237 a8083063 Iustin Pop

3238 a8083063 Iustin Pop
    """
3239 a8083063 Iustin Pop
    pass
3240 a8083063 Iustin Pop
3241 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3242 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
3243 a8083063 Iustin Pop

3244 a8083063 Iustin Pop
    """
3245 ae5849b5 Michael Hanselmann
    values = []
3246 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
3247 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
3248 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
3249 ae5849b5 Michael Hanselmann
      elif field == "master_node":
3250 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
3251 3ccafd0e Iustin Pop
      elif field == "drain_flag":
3252 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
3253 05e50653 Michael Hanselmann
      elif field == "watcher_pause":
3254 05e50653 Michael Hanselmann
        return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
3255 ae5849b5 Michael Hanselmann
      else:
3256 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
3257 3ccafd0e Iustin Pop
      values.append(entry)
3258 ae5849b5 Michael Hanselmann
    return values
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
3261 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
3262 a8083063 Iustin Pop
  """Bring up an instance's disks.
3263 a8083063 Iustin Pop

3264 a8083063 Iustin Pop
  """
3265 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3266 f22a8ba3 Guido Trotter
  REQ_BGL = False
3267 f22a8ba3 Guido Trotter
3268 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3269 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3270 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3271 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3272 f22a8ba3 Guido Trotter
3273 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3274 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3275 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3276 a8083063 Iustin Pop
3277 a8083063 Iustin Pop
  def CheckPrereq(self):
3278 a8083063 Iustin Pop
    """Check prerequisites.
3279 a8083063 Iustin Pop

3280 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3281 a8083063 Iustin Pop

3282 a8083063 Iustin Pop
    """
3283 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3284 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3285 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3286 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3287 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
3288 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
3289 a8083063 Iustin Pop
3290 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3291 a8083063 Iustin Pop
    """Activate the disks.
3292 a8083063 Iustin Pop

3293 a8083063 Iustin Pop
    """
3294 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
3295 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
3296 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
3297 a8083063 Iustin Pop
    if not disks_ok:
3298 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
3299 a8083063 Iustin Pop
3300 a8083063 Iustin Pop
    return disks_info
3301 a8083063 Iustin Pop
3302 a8083063 Iustin Pop
3303 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
3304 e3443b36 Iustin Pop
                           ignore_size=False):
3305 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
3306 a8083063 Iustin Pop

3307 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
3308 a8083063 Iustin Pop

3309 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
3310 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
3311 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
3312 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
3313 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
3314 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
3315 e4376078 Iustin Pop
      won't result in an error return from the function
3316 e3443b36 Iustin Pop
  @type ignore_size: boolean
3317 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
3318 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
3319 e3443b36 Iustin Pop
      when the size is wrong
3320 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
3321 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
3322 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
  """
3325 a8083063 Iustin Pop
  device_info = []
3326 a8083063 Iustin Pop
  disks_ok = True
3327 fdbd668d Iustin Pop
  iname = instance.name
3328 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
3329 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
3330 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
3331 fdbd668d Iustin Pop
3332 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
3333 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
3334 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
3335 fdbd668d Iustin Pop
  # SyncSource, etc.)
3336 fdbd668d Iustin Pop
3337 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
3338 a8083063 Iustin Pop
  for inst_disk in instance.disks:
3339 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3340 e3443b36 Iustin Pop
      if ignore_size:
3341 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3342 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3343 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3344 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
3345 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3346 53c14ef1 Iustin Pop
      if msg:
3347 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3348 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
3349 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3350 fdbd668d Iustin Pop
        if not ignore_secondaries:
3351 a8083063 Iustin Pop
          disks_ok = False
3352 fdbd668d Iustin Pop
3353 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
3354 fdbd668d Iustin Pop
3355 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
3356 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
3357 d52ea991 Michael Hanselmann
    dev_path = None
3358 d52ea991 Michael Hanselmann
3359 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
3360 fdbd668d Iustin Pop
      if node != instance.primary_node:
3361 fdbd668d Iustin Pop
        continue
3362 e3443b36 Iustin Pop
      if ignore_size:
3363 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
3364 e3443b36 Iustin Pop
        node_disk.UnsetSize()
3365 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
3366 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
3367 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3368 53c14ef1 Iustin Pop
      if msg:
3369 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
3370 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
3371 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
3372 fdbd668d Iustin Pop
        disks_ok = False
3373 d52ea991 Michael Hanselmann
      else:
3374 d52ea991 Michael Hanselmann
        dev_path = result.payload
3375 d52ea991 Michael Hanselmann
3376 d52ea991 Michael Hanselmann
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
3377 a8083063 Iustin Pop
3378 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
3379 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
3380 b352ab5b Iustin Pop
  # improving the logical/physical id handling
3381 b352ab5b Iustin Pop
  for disk in instance.disks:
3382 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
3383 b352ab5b Iustin Pop
3384 a8083063 Iustin Pop
  return disks_ok, device_info
3385 a8083063 Iustin Pop
3386 a8083063 Iustin Pop
3387 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
3388 3ecf6786 Iustin Pop
  """Start the disks of an instance.
3389 3ecf6786 Iustin Pop

3390 3ecf6786 Iustin Pop
  """
3391 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
3392 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
3393 fe7b0351 Michael Hanselmann
  if not disks_ok:
3394 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
3395 fe7b0351 Michael Hanselmann
    if force is not None and not force:
3396 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
3397 86d9d3bb Iustin Pop
                         " secondary node,"
3398 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
3399 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
3400 fe7b0351 Michael Hanselmann
3401 fe7b0351 Michael Hanselmann
3402 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
3403 a8083063 Iustin Pop
  """Shutdown an instance's disks.
3404 a8083063 Iustin Pop

3405 a8083063 Iustin Pop
  """
3406 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3407 f22a8ba3 Guido Trotter
  REQ_BGL = False
3408 f22a8ba3 Guido Trotter
3409 f22a8ba3 Guido Trotter
  def ExpandNames(self):
3410 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
3411 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3412 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3413 f22a8ba3 Guido Trotter
3414 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
3415 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
3416 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
3417 a8083063 Iustin Pop
3418 a8083063 Iustin Pop
  def CheckPrereq(self):
3419 a8083063 Iustin Pop
    """Check prerequisites.
3420 a8083063 Iustin Pop

3421 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3422 a8083063 Iustin Pop

3423 a8083063 Iustin Pop
    """
3424 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3425 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
3426 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3427 a8083063 Iustin Pop
3428 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3429 a8083063 Iustin Pop
    """Deactivate the disks
3430 a8083063 Iustin Pop

3431 a8083063 Iustin Pop
    """
3432 a8083063 Iustin Pop
    instance = self.instance
3433 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
3434 a8083063 Iustin Pop
3435 a8083063 Iustin Pop
3436 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
3437 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
3438 155d6c75 Guido Trotter

3439 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
3440 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
3441 155d6c75 Guido Trotter

3442 155d6c75 Guido Trotter
  """
3443 aca13712 Iustin Pop
  pnode = instance.primary_node
3444 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
3445 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
3446 aca13712 Iustin Pop
3447 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
3448 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
3449 155d6c75 Guido Trotter
                             " block devices.")
3450 155d6c75 Guido Trotter
3451 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
3452 a8083063 Iustin Pop
3453 a8083063 Iustin Pop
3454 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
3455 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
3456 a8083063 Iustin Pop

3457 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
3458 a8083063 Iustin Pop

3459 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
3460 a8083063 Iustin Pop
  ignored.
3461 a8083063 Iustin Pop

3462 a8083063 Iustin Pop
  """
3463 cacfd1fd Iustin Pop
  all_result = True
3464 a8083063 Iustin Pop
  for disk in instance.disks:
3465 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
3466 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
3467 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
3468 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3469 cacfd1fd Iustin Pop
      if msg:
3470 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
3471 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
3472 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
3473 cacfd1fd Iustin Pop
          all_result = False
3474 cacfd1fd Iustin Pop
  return all_result
3475 a8083063 Iustin Pop
3476 a8083063 Iustin Pop
3477 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
3478 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
3479 d4f16fd9 Iustin Pop

3480 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
3481 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
3482 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
3483 d4f16fd9 Iustin Pop
  exception.
3484 d4f16fd9 Iustin Pop

3485 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
3486 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
3487 e69d05fd Iustin Pop
  @type node: C{str}
3488 e69d05fd Iustin Pop
  @param node: the node to check
3489 e69d05fd Iustin Pop
  @type reason: C{str}
3490 e69d05fd Iustin Pop
  @param reason: string to use in the error message
3491 e69d05fd Iustin Pop
  @type requested: C{int}
3492 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
3493 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
3494 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
3495 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
3496 e69d05fd Iustin Pop
      we cannot check the node
3497 d4f16fd9 Iustin Pop

3498 d4f16fd9 Iustin Pop
  """
3499 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
3500 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
3501 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
3502 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
3503 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
3504 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
3505 d4f16fd9 Iustin Pop
  if requested > free_mem:
3506 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
3507 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
3508 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
3509 d4f16fd9 Iustin Pop
3510 d4f16fd9 Iustin Pop
3511 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
3512 a8083063 Iustin Pop
  """Starts an instance.
3513 a8083063 Iustin Pop

3514 a8083063 Iustin Pop
  """
3515 a8083063 Iustin Pop
  HPATH = "instance-start"
3516 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3517 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
3518 e873317a Guido Trotter
  REQ_BGL = False
3519 e873317a Guido Trotter
3520 e873317a Guido Trotter
  def ExpandNames(self):
3521 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3522 a8083063 Iustin Pop
3523 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3524 a8083063 Iustin Pop
    """Build hooks env.
3525 a8083063 Iustin Pop

3526 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3527 a8083063 Iustin Pop

3528 a8083063 Iustin Pop
    """
3529 a8083063 Iustin Pop
    env = {
3530 a8083063 Iustin Pop
      "FORCE": self.op.force,
3531 a8083063 Iustin Pop
      }
3532 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3533 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3534 a8083063 Iustin Pop
    return env, nl, nl
3535 a8083063 Iustin Pop
3536 a8083063 Iustin Pop
  def CheckPrereq(self):
3537 a8083063 Iustin Pop
    """Check prerequisites.
3538 a8083063 Iustin Pop

3539 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3540 a8083063 Iustin Pop

3541 a8083063 Iustin Pop
    """
3542 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3543 e873317a Guido Trotter
    assert self.instance is not None, \
3544 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3545 a8083063 Iustin Pop
3546 d04aaa2f Iustin Pop
    # extra beparams
3547 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
3548 d04aaa2f Iustin Pop
    if self.beparams:
3549 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
3550 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
3551 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
3552 d04aaa2f Iustin Pop
      # fill the beparams dict
3553 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
3554 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
3555 d04aaa2f Iustin Pop
3556 d04aaa2f Iustin Pop
    # extra hvparams
3557 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
3558 d04aaa2f Iustin Pop
    if self.hvparams:
3559 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3560 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3561 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3562 d04aaa2f Iustin Pop
3563 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3564 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3565 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3566 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
3567 d04aaa2f Iustin Pop
                                    instance.hvparams)
3568 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3569 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3570 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3571 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3572 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3573 d04aaa2f Iustin Pop
3574 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3575 7527a8a4 Iustin Pop
3576 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3577 5bbd3f7f Michael Hanselmann
    # check bridges existence
3578 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3579 a8083063 Iustin Pop
3580 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3581 f1926756 Guido Trotter
                                              instance.name,
3582 f1926756 Guido Trotter
                                              instance.hypervisor)
3583 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3584 4c4e4e1e Iustin Pop
                      prereq=True)
3585 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3586 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3587 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3588 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3589 d4f16fd9 Iustin Pop
3590 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3591 a8083063 Iustin Pop
    """Start the instance.
3592 a8083063 Iustin Pop

3593 a8083063 Iustin Pop
    """
3594 a8083063 Iustin Pop
    instance = self.instance
3595 a8083063 Iustin Pop
    force = self.op.force
3596 a8083063 Iustin Pop
3597 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3598 fe482621 Iustin Pop
3599 a8083063 Iustin Pop
    node_current = instance.primary_node
3600 a8083063 Iustin Pop
3601 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3602 a8083063 Iustin Pop
3603 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3604 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3605 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3606 dd279568 Iustin Pop
    if msg:
3607 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3608 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3609 a8083063 Iustin Pop
3610 a8083063 Iustin Pop
3611 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3612 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3613 bf6929a2 Alexander Schreiber

3614 bf6929a2 Alexander Schreiber
  """
3615 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3616 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3617 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3618 e873317a Guido Trotter
  REQ_BGL = False
3619 e873317a Guido Trotter
3620 17c3f802 Guido Trotter
  def CheckArguments(self):
3621 17c3f802 Guido Trotter
    """Check the arguments.
3622 17c3f802 Guido Trotter

3623 17c3f802 Guido Trotter
    """
3624 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
3625 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
3626 17c3f802 Guido Trotter
3627 e873317a Guido Trotter
  def ExpandNames(self):
3628 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3629 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3630 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3631 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3632 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3633 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3634 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3635 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3636 bf6929a2 Alexander Schreiber
3637 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3638 bf6929a2 Alexander Schreiber
    """Build hooks env.
3639 bf6929a2 Alexander Schreiber

3640 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3641 bf6929a2 Alexander Schreiber

3642 bf6929a2 Alexander Schreiber
    """
3643 bf6929a2 Alexander Schreiber
    env = {
3644 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3645 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3646 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
3647 bf6929a2 Alexander Schreiber
      }
3648 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3649 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3650 bf6929a2 Alexander Schreiber
    return env, nl, nl
3651 bf6929a2 Alexander Schreiber
3652 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3653 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3654 bf6929a2 Alexander Schreiber

3655 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3656 bf6929a2 Alexander Schreiber

3657 bf6929a2 Alexander Schreiber
    """
3658 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3659 e873317a Guido Trotter
    assert self.instance is not None, \
3660 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3661 bf6929a2 Alexander Schreiber
3662 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3663 7527a8a4 Iustin Pop
3664 5bbd3f7f Michael Hanselmann
    # check bridges existence
3665 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3666 bf6929a2 Alexander Schreiber
3667 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3668 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3669 bf6929a2 Alexander Schreiber

3670 bf6929a2 Alexander Schreiber
    """
3671 bf6929a2 Alexander Schreiber
    instance = self.instance
3672 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3673 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3674 bf6929a2 Alexander Schreiber
3675 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3676 bf6929a2 Alexander Schreiber
3677 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3678 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3679 ae48ac32 Iustin Pop
      for disk in instance.disks:
3680 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3681 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3682 17c3f802 Guido Trotter
                                             reboot_type,
3683 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
3684 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3685 bf6929a2 Alexander Schreiber
    else:
3686 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(node_current, instance,
3687 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
3688 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3689 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3690 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3691 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3692 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3693 dd279568 Iustin Pop
      if msg:
3694 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3695 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3696 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3697 bf6929a2 Alexander Schreiber
3698 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3699 bf6929a2 Alexander Schreiber
3700 bf6929a2 Alexander Schreiber
3701 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3702 a8083063 Iustin Pop
  """Shutdown an instance.
3703 a8083063 Iustin Pop

3704 a8083063 Iustin Pop
  """
3705 a8083063 Iustin Pop
  HPATH = "instance-stop"
3706 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3707 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3708 e873317a Guido Trotter
  REQ_BGL = False
3709 e873317a Guido Trotter
3710 6263189c Guido Trotter
  def CheckArguments(self):
3711 6263189c Guido Trotter
    """Check the arguments.
3712 6263189c Guido Trotter

3713 6263189c Guido Trotter
    """
3714 6263189c Guido Trotter
    self.timeout = getattr(self.op, "timeout",
3715 6263189c Guido Trotter
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
3716 6263189c Guido Trotter
3717 e873317a Guido Trotter
  def ExpandNames(self):
3718 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3719 a8083063 Iustin Pop
3720 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3721 a8083063 Iustin Pop
    """Build hooks env.
3722 a8083063 Iustin Pop

3723 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3724 a8083063 Iustin Pop

3725 a8083063 Iustin Pop
    """
3726 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3727 6263189c Guido Trotter
    env["TIMEOUT"] = self.timeout
3728 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3729 a8083063 Iustin Pop
    return env, nl, nl
3730 a8083063 Iustin Pop
3731 a8083063 Iustin Pop
  def CheckPrereq(self):
3732 a8083063 Iustin Pop
    """Check prerequisites.
3733 a8083063 Iustin Pop

3734 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3735 a8083063 Iustin Pop

3736 a8083063 Iustin Pop
    """
3737 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3738 e873317a Guido Trotter
    assert self.instance is not None, \
3739 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3740 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3741 a8083063 Iustin Pop
3742 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3743 a8083063 Iustin Pop
    """Shutdown the instance.
3744 a8083063 Iustin Pop

3745 a8083063 Iustin Pop
    """
3746 a8083063 Iustin Pop
    instance = self.instance
3747 a8083063 Iustin Pop
    node_current = instance.primary_node
3748 6263189c Guido Trotter
    timeout = self.timeout
3749 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3750 6263189c Guido Trotter
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
3751 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3752 1fae010f Iustin Pop
    if msg:
3753 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3754 a8083063 Iustin Pop
3755 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3756 a8083063 Iustin Pop
3757 a8083063 Iustin Pop
3758 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3759 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3760 fe7b0351 Michael Hanselmann

3761 fe7b0351 Michael Hanselmann
  """
3762 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3763 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3764 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3765 4e0b4d2d Guido Trotter
  REQ_BGL = False
3766 4e0b4d2d Guido Trotter
3767 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3768 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3769 fe7b0351 Michael Hanselmann
3770 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3771 fe7b0351 Michael Hanselmann
    """Build hooks env.
3772 fe7b0351 Michael Hanselmann

3773 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3774 fe7b0351 Michael Hanselmann

3775 fe7b0351 Michael Hanselmann
    """
3776 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3777 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3778 fe7b0351 Michael Hanselmann
    return env, nl, nl
3779 fe7b0351 Michael Hanselmann
3780 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3781 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3782 fe7b0351 Michael Hanselmann

3783 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3784 fe7b0351 Michael Hanselmann

3785 fe7b0351 Michael Hanselmann
    """
3786 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3787 4e0b4d2d Guido Trotter
    assert instance is not None, \
3788 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3789 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3790 4e0b4d2d Guido Trotter
3791 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3792 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3793 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3794 0d68c45d Iustin Pop
    if instance.admin_up:
3795 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3796 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3797 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3798 72737a7f Iustin Pop
                                              instance.name,
3799 72737a7f Iustin Pop
                                              instance.hypervisor)
3800 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3801 4c4e4e1e Iustin Pop
                      prereq=True)
3802 7ad1af4a Iustin Pop
    if remote_info.payload:
3803 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3804 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3805 3ecf6786 Iustin Pop
                                  instance.primary_node))
3806 d0834de3 Michael Hanselmann
3807 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3808 f2c05717 Guido Trotter
    self.op.force_variant = getattr(self.op, "force_variant", False)
3809 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3810 d0834de3 Michael Hanselmann
      # OS verification
3811 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3812 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3813 d0834de3 Michael Hanselmann
      if pnode is None:
3814 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3815 3ecf6786 Iustin Pop
                                   self.op.pnode)
3816 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3817 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3818 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3819 f2c05717 Guido Trotter
      if not self.op.force_variant:
3820 f2c05717 Guido Trotter
        _CheckOSVariant(result.payload, self.op.os_type)
3821 d0834de3 Michael Hanselmann
3822 fe7b0351 Michael Hanselmann
    self.instance = instance
3823 fe7b0351 Michael Hanselmann
3824 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3825 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3826 fe7b0351 Michael Hanselmann

3827 fe7b0351 Michael Hanselmann
    """
3828 fe7b0351 Michael Hanselmann
    inst = self.instance
3829 fe7b0351 Michael Hanselmann
3830 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3831 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3832 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3833 a4eae71f Michael Hanselmann
      self.cfg.Update(inst, feedback_fn)
3834 d0834de3 Michael Hanselmann
3835 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3836 fe7b0351 Michael Hanselmann
    try:
3837 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3838 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3839 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3840 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3841 fe7b0351 Michael Hanselmann
    finally:
3842 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3843 fe7b0351 Michael Hanselmann
3844 fe7b0351 Michael Hanselmann
3845 bd315bfa Iustin Pop
class LURecreateInstanceDisks(LogicalUnit):
3846 bd315bfa Iustin Pop
  """Recreate an instance's missing disks.
3847 bd315bfa Iustin Pop

3848 bd315bfa Iustin Pop
  """
3849 bd315bfa Iustin Pop
  HPATH = "instance-recreate-disks"
3850 bd315bfa Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3851 bd315bfa Iustin Pop
  _OP_REQP = ["instance_name", "disks"]
3852 bd315bfa Iustin Pop
  REQ_BGL = False
3853 bd315bfa Iustin Pop
3854 bd315bfa Iustin Pop
  def CheckArguments(self):
3855 bd315bfa Iustin Pop
    """Check the arguments.
3856 bd315bfa Iustin Pop

3857 bd315bfa Iustin Pop
    """
3858 bd315bfa Iustin Pop
    if not isinstance(self.op.disks, list):
3859 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Invalid disks parameter")
3860 bd315bfa Iustin Pop
    for item in self.op.disks:
3861 bd315bfa Iustin Pop
      if (not isinstance(item, int) or
3862 bd315bfa Iustin Pop
          item < 0):
3863 bd315bfa Iustin Pop
        raise errors.OpPrereqError("Invalid disk specification '%s'" %
3864 bd315bfa Iustin Pop
                                   str(item))
3865 bd315bfa Iustin Pop
3866 bd315bfa Iustin Pop
  def ExpandNames(self):
3867 bd315bfa Iustin Pop
    self._ExpandAndLockInstance()
3868 bd315bfa Iustin Pop
3869 bd315bfa Iustin Pop
  def BuildHooksEnv(self):
3870 bd315bfa Iustin Pop
    """Build hooks env.
3871 bd315bfa Iustin Pop

3872 bd315bfa Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3873 bd315bfa Iustin Pop

3874 bd315bfa Iustin Pop
    """
3875 bd315bfa Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3876 bd315bfa Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3877 bd315bfa Iustin Pop
    return env, nl, nl
3878 bd315bfa Iustin Pop
3879 bd315bfa Iustin Pop
  def CheckPrereq(self):
3880 bd315bfa Iustin Pop
    """Check prerequisites.
3881 bd315bfa Iustin Pop

3882 bd315bfa Iustin Pop
    This checks that the instance is in the cluster and is not running.
3883 bd315bfa Iustin Pop

3884 bd315bfa Iustin Pop
    """
3885 bd315bfa Iustin Pop
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3886 bd315bfa Iustin Pop
    assert instance is not None, \
3887 bd315bfa Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
3888 bd315bfa Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3889 bd315bfa Iustin Pop
3890 bd315bfa Iustin Pop
    if instance.disk_template == constants.DT_DISKLESS:
3891 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3892 bd315bfa Iustin Pop
                                 self.op.instance_name)
3893 bd315bfa Iustin Pop
    if instance.admin_up:
3894 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3895 bd315bfa Iustin Pop
                                 self.op.instance_name)
3896 bd315bfa Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3897 bd315bfa Iustin Pop
                                              instance.name,
3898 bd315bfa Iustin Pop
                                              instance.hypervisor)
3899 bd315bfa Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3900 bd315bfa Iustin Pop
                      prereq=True)
3901 bd315bfa Iustin Pop
    if remote_info.payload:
3902 bd315bfa Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3903 bd315bfa Iustin Pop
                                 (self.op.instance_name,
3904 bd315bfa Iustin Pop
                                  instance.primary_node))
3905 bd315bfa Iustin Pop
3906 bd315bfa Iustin Pop
    if not self.op.disks:
3907 bd315bfa Iustin Pop
      self.op.disks = range(len(instance.disks))
3908 bd315bfa Iustin Pop
    else:
3909 bd315bfa Iustin Pop
      for idx in self.op.disks:
3910 bd315bfa Iustin Pop
        if idx >= len(instance.disks):
3911 bd315bfa Iustin Pop
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx)
3912 bd315bfa Iustin Pop
3913 bd315bfa Iustin Pop
    self.instance = instance
3914 bd315bfa Iustin Pop
3915 bd315bfa Iustin Pop
  def Exec(self, feedback_fn):
3916 bd315bfa Iustin Pop
    """Recreate the disks.
3917 bd315bfa Iustin Pop

3918 bd315bfa Iustin Pop
    """
3919 bd315bfa Iustin Pop
    to_skip = []
3920 bd315bfa Iustin Pop
    for idx, disk in enumerate(self.instance.disks):
3921 bd315bfa Iustin Pop
      if idx not in self.op.disks: # disk idx has not been passed in
3922 bd315bfa Iustin Pop
        to_skip.append(idx)
3923 bd315bfa Iustin Pop
        continue
3924 bd315bfa Iustin Pop
3925 bd315bfa Iustin Pop
    _CreateDisks(self, self.instance, to_skip=to_skip)
3926 bd315bfa Iustin Pop
3927 bd315bfa Iustin Pop
3928 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3929 decd5f45 Iustin Pop
  """Rename an instance.
3930 decd5f45 Iustin Pop

3931 decd5f45 Iustin Pop
  """
3932 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3933 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3934 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3935 decd5f45 Iustin Pop
3936 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3937 decd5f45 Iustin Pop
    """Build hooks env.
3938 decd5f45 Iustin Pop

3939 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3940 decd5f45 Iustin Pop

3941 decd5f45 Iustin Pop
    """
3942 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3943 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3944 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3945 decd5f45 Iustin Pop
    return env, nl, nl
3946 decd5f45 Iustin Pop
3947 decd5f45 Iustin Pop
  def CheckPrereq(self):
3948 decd5f45 Iustin Pop
    """Check prerequisites.
3949 decd5f45 Iustin Pop

3950 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3951 decd5f45 Iustin Pop

3952 decd5f45 Iustin Pop
    """
3953 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3954 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3955 decd5f45 Iustin Pop
    if instance is None:
3956 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3957 decd5f45 Iustin Pop
                                 self.op.instance_name)
3958 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3959 7527a8a4 Iustin Pop
3960 0d68c45d Iustin Pop
    if instance.admin_up:
3961 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3962 decd5f45 Iustin Pop
                                 self.op.instance_name)
3963 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3964 72737a7f Iustin Pop
                                              instance.name,
3965 72737a7f Iustin Pop
                                              instance.hypervisor)
3966 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3967 4c4e4e1e Iustin Pop
                      prereq=True)
3968 7ad1af4a Iustin Pop
    if remote_info.payload:
3969 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3970 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3971 decd5f45 Iustin Pop
                                  instance.primary_node))
3972 decd5f45 Iustin Pop
    self.instance = instance
3973 decd5f45 Iustin Pop
3974 decd5f45 Iustin Pop
    # new name verification
3975 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3976 decd5f45 Iustin Pop
3977 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3978 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3979 7bde3275 Guido Trotter
    if new_name in instance_list:
3980 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3981 c09f363f Manuel Franceschini
                                 new_name)
3982 7bde3275 Guido Trotter
3983 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3984 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3985 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3986 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3987 decd5f45 Iustin Pop
3988 decd5f45 Iustin Pop
3989 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3990 decd5f45 Iustin Pop
    """Reinstall the instance.
3991 decd5f45 Iustin Pop

3992 decd5f45 Iustin Pop
    """
3993 decd5f45 Iustin Pop
    inst = self.instance
3994 decd5f45 Iustin Pop
    old_name = inst.name
3995 decd5f45 Iustin Pop
3996 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3997 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3998 b23c4333 Manuel Franceschini
3999 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
4000 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
4001 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
4002 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
4003 decd5f45 Iustin Pop
4004 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
4005 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
4006 decd5f45 Iustin Pop
4007 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
4008 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
4009 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
4010 72737a7f Iustin Pop
                                                     old_file_storage_dir,
4011 72737a7f Iustin Pop
                                                     new_file_storage_dir)
4012 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
4013 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
4014 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
4015 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
4016 b23c4333 Manuel Franceschini
4017 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
4018 decd5f45 Iustin Pop
    try:
4019 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
4020 781de953 Iustin Pop
                                                 old_name)
4021 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4022 96841384 Iustin Pop
      if msg:
4023 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
4024 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
4025 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
4026 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
4027 decd5f45 Iustin Pop
    finally:
4028 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
4029 decd5f45 Iustin Pop
4030 decd5f45 Iustin Pop
4031 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
4032 a8083063 Iustin Pop
  """Remove an instance.
4033 a8083063 Iustin Pop

4034 a8083063 Iustin Pop
  """
4035 a8083063 Iustin Pop
  HPATH = "instance-remove"
4036 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4037 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
4038 cf472233 Guido Trotter
  REQ_BGL = False
4039 cf472233 Guido Trotter
4040 17c3f802 Guido Trotter
  def CheckArguments(self):
4041 17c3f802 Guido Trotter
    """Check the arguments.
4042 17c3f802 Guido Trotter

4043 17c3f802 Guido Trotter
    """
4044 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4045 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4046 17c3f802 Guido Trotter
4047 cf472233 Guido Trotter
  def ExpandNames(self):
4048 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
4049 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4050 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4051 cf472233 Guido Trotter
4052 cf472233 Guido Trotter
  def DeclareLocks(self, level):
4053 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
4054 cf472233 Guido Trotter
      self._LockInstancesNodes()
4055 a8083063 Iustin Pop
4056 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4057 a8083063 Iustin Pop
    """Build hooks env.
4058 a8083063 Iustin Pop

4059 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4060 a8083063 Iustin Pop

4061 a8083063 Iustin Pop
    """
4062 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4063 17c3f802 Guido Trotter
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4064 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4065 a8083063 Iustin Pop
    return env, nl, nl
4066 a8083063 Iustin Pop
4067 a8083063 Iustin Pop
  def CheckPrereq(self):
4068 a8083063 Iustin Pop
    """Check prerequisites.
4069 a8083063 Iustin Pop

4070 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4071 a8083063 Iustin Pop

4072 a8083063 Iustin Pop
    """
4073 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4074 cf472233 Guido Trotter
    assert self.instance is not None, \
4075 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4076 a8083063 Iustin Pop
4077 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4078 a8083063 Iustin Pop
    """Remove the instance.
4079 a8083063 Iustin Pop

4080 a8083063 Iustin Pop
    """
4081 a8083063 Iustin Pop
    instance = self.instance
4082 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4083 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
4084 a8083063 Iustin Pop
4085 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4086 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4087 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4088 1fae010f Iustin Pop
    if msg:
4089 1d67656e Iustin Pop
      if self.op.ignore_failures:
4090 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
4091 1d67656e Iustin Pop
      else:
4092 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4093 1fae010f Iustin Pop
                                 " node %s: %s" %
4094 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
4095 a8083063 Iustin Pop
4096 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
4097 a8083063 Iustin Pop
4098 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
4099 1d67656e Iustin Pop
      if self.op.ignore_failures:
4100 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
4101 1d67656e Iustin Pop
      else:
4102 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
4103 a8083063 Iustin Pop
4104 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
4105 a8083063 Iustin Pop
4106 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
4107 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
4108 a8083063 Iustin Pop
4109 a8083063 Iustin Pop
4110 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
4111 a8083063 Iustin Pop
  """Logical unit for querying instances.
4112 a8083063 Iustin Pop

4113 a8083063 Iustin Pop
  """
4114 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
4115 7eb9d8f7 Guido Trotter
  REQ_BGL = False
4116 19bed813 Iustin Pop
  _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor",
4117 19bed813 Iustin Pop
                    "serial_no", "ctime", "mtime", "uuid"]
4118 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
4119 5b460366 Iustin Pop
                                    "admin_state",
4120 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
4121 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
4122 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
4123 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
4124 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
4125 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
4126 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
4127 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
4128 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
4129 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
4130 19bed813 Iustin Pop
                                    "hvparams",
4131 19bed813 Iustin Pop
                                    ] + _SIMPLE_FIELDS +
4132 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
4133 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
4134 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
4135 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
4136 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
4137 31bf511f Iustin Pop
4138 a8083063 Iustin Pop
4139 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
4140 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
4141 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
4142 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
4143 a8083063 Iustin Pop
4144 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
4145 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
4146 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
4147 7eb9d8f7 Guido Trotter
4148 57a2fb91 Iustin Pop
    if self.op.names:
4149 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
4150 7eb9d8f7 Guido Trotter
    else:
4151 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
4152 7eb9d8f7 Guido Trotter
4153 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
4154 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
4155 57a2fb91 Iustin Pop
    if self.do_locking:
4156 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
4157 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
4158 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4159 7eb9d8f7 Guido Trotter
4160 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
4161 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
4162 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
4163 7eb9d8f7 Guido Trotter
4164 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
4165 7eb9d8f7 Guido Trotter
    """Check prerequisites.
4166 7eb9d8f7 Guido Trotter

4167 7eb9d8f7 Guido Trotter
    """
4168 57a2fb91 Iustin Pop
    pass
4169 069dcc86 Iustin Pop
4170 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4171 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
4172 a8083063 Iustin Pop

4173 a8083063 Iustin Pop
    """
4174 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
4175 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
4176 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
4177 a7f5dc98 Iustin Pop
      if self.do_locking:
4178 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
4179 a7f5dc98 Iustin Pop
      else:
4180 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
4181 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
4182 57a2fb91 Iustin Pop
    else:
4183 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
4184 a7f5dc98 Iustin Pop
      if self.do_locking:
4185 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
4186 a7f5dc98 Iustin Pop
      else:
4187 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
4188 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
4189 a7f5dc98 Iustin Pop
      if missing:
4190 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
4191 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
4192 a7f5dc98 Iustin Pop
      instance_names = self.wanted
4193 c1f1cbb2 Iustin Pop
4194 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
4195 a8083063 Iustin Pop
4196 a8083063 Iustin Pop
    # begin data gathering
4197 a8083063 Iustin Pop
4198 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
4199 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
4200 a8083063 Iustin Pop
4201 a8083063 Iustin Pop
    bad_nodes = []
4202 cbfc4681 Iustin Pop
    off_nodes = []
4203 ec79568d Iustin Pop
    if self.do_node_query:
4204 a8083063 Iustin Pop
      live_data = {}
4205 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
4206 a8083063 Iustin Pop
      for name in nodes:
4207 a8083063 Iustin Pop
        result = node_data[name]
4208 cbfc4681 Iustin Pop
        if result.offline:
4209 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
4210 cbfc4681 Iustin Pop
          off_nodes.append(name)
4211 3cebe102 Michael Hanselmann
        if result.fail_msg:
4212 a8083063 Iustin Pop
          bad_nodes.append(name)
4213 781de953 Iustin Pop
        else:
4214 2fa74ef4 Iustin Pop
          if result.payload:
4215 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
4216 2fa74ef4 Iustin Pop
          # else no instance is alive
4217 a8083063 Iustin Pop
    else:
4218 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
4219 a8083063 Iustin Pop
4220 a8083063 Iustin Pop
    # end data gathering
4221 a8083063 Iustin Pop
4222 5018a335 Iustin Pop
    HVPREFIX = "hv/"
4223 338e51e8 Iustin Pop
    BEPREFIX = "be/"
4224 a8083063 Iustin Pop
    output = []
4225 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4226 a8083063 Iustin Pop
    for instance in instance_list:
4227 a8083063 Iustin Pop
      iout = []
4228 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
4229 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
4230 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4231 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
4232 a8083063 Iustin Pop
      for field in self.op.output_fields:
4233 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
4234 19bed813 Iustin Pop
        if field in self._SIMPLE_FIELDS:
4235 19bed813 Iustin Pop
          val = getattr(instance, field)
4236 a8083063 Iustin Pop
        elif field == "pnode":
4237 a8083063 Iustin Pop
          val = instance.primary_node
4238 a8083063 Iustin Pop
        elif field == "snodes":
4239 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
4240 a8083063 Iustin Pop
        elif field == "admin_state":
4241 0d68c45d Iustin Pop
          val = instance.admin_up
4242 a8083063 Iustin Pop
        elif field == "oper_state":
4243 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4244 8a23d2d3 Iustin Pop
            val = None
4245 a8083063 Iustin Pop
          else:
4246 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
4247 d8052456 Iustin Pop
        elif field == "status":
4248 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
4249 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
4250 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
4251 d8052456 Iustin Pop
            val = "ERROR_nodedown"
4252 d8052456 Iustin Pop
          else:
4253 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
4254 d8052456 Iustin Pop
            if running:
4255 0d68c45d Iustin Pop
              if instance.admin_up:
4256 d8052456 Iustin Pop
                val = "running"
4257 d8052456 Iustin Pop
              else:
4258 d8052456 Iustin Pop
                val = "ERROR_up"
4259 d8052456 Iustin Pop
            else:
4260 0d68c45d Iustin Pop
              if instance.admin_up:
4261 d8052456 Iustin Pop
                val = "ERROR_down"
4262 d8052456 Iustin Pop
              else:
4263 d8052456 Iustin Pop
                val = "ADMIN_down"
4264 a8083063 Iustin Pop
        elif field == "oper_ram":
4265 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
4266 8a23d2d3 Iustin Pop
            val = None
4267 a8083063 Iustin Pop
          elif instance.name in live_data:
4268 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
4269 a8083063 Iustin Pop
          else:
4270 a8083063 Iustin Pop
            val = "-"
4271 c1ce76bb Iustin Pop
        elif field == "vcpus":
4272 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
4273 a8083063 Iustin Pop
        elif field == "disk_template":
4274 a8083063 Iustin Pop
          val = instance.disk_template
4275 a8083063 Iustin Pop
        elif field == "ip":
4276 39a02558 Guido Trotter
          if instance.nics:
4277 39a02558 Guido Trotter
            val = instance.nics[0].ip
4278 39a02558 Guido Trotter
          else:
4279 39a02558 Guido Trotter
            val = None
4280 638c6349 Guido Trotter
        elif field == "nic_mode":
4281 638c6349 Guido Trotter
          if instance.nics:
4282 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
4283 638c6349 Guido Trotter
          else:
4284 638c6349 Guido Trotter
            val = None
4285 638c6349 Guido Trotter
        elif field == "nic_link":
4286 39a02558 Guido Trotter
          if instance.nics:
4287 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4288 638c6349 Guido Trotter
          else:
4289 638c6349 Guido Trotter
            val = None
4290 638c6349 Guido Trotter
        elif field == "bridge":
4291 638c6349 Guido Trotter
          if (instance.nics and
4292 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
4293 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
4294 39a02558 Guido Trotter
          else:
4295 39a02558 Guido Trotter
            val = None
4296 a8083063 Iustin Pop
        elif field == "mac":
4297 39a02558 Guido Trotter
          if instance.nics:
4298 39a02558 Guido Trotter
            val = instance.nics[0].mac
4299 39a02558 Guido Trotter
          else:
4300 39a02558 Guido Trotter
            val = None
4301 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
4302 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
4303 ad24e046 Iustin Pop
          try:
4304 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
4305 ad24e046 Iustin Pop
          except errors.OpPrereqError:
4306 8a23d2d3 Iustin Pop
            val = None
4307 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
4308 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
4309 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
4310 130a6a6f Iustin Pop
        elif field == "tags":
4311 130a6a6f Iustin Pop
          val = list(instance.GetTags())
4312 338e51e8 Iustin Pop
        elif field == "hvparams":
4313 338e51e8 Iustin Pop
          val = i_hv
4314 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
4315 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
4316 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
4317 338e51e8 Iustin Pop
        elif field == "beparams":
4318 338e51e8 Iustin Pop
          val = i_be
4319 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
4320 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
4321 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
4322 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
4323 71c1af58 Iustin Pop
          # matches a variable list
4324 71c1af58 Iustin Pop
          st_groups = st_match.groups()
4325 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
4326 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4327 71c1af58 Iustin Pop
              val = len(instance.disks)
4328 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
4329 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
4330 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
4331 3e0cea06 Iustin Pop
              try:
4332 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
4333 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
4334 71c1af58 Iustin Pop
                val = None
4335 71c1af58 Iustin Pop
            else:
4336 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
4337 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
4338 71c1af58 Iustin Pop
            if st_groups[1] == "count":
4339 71c1af58 Iustin Pop
              val = len(instance.nics)
4340 41a776da Iustin Pop
            elif st_groups[1] == "macs":
4341 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
4342 41a776da Iustin Pop
            elif st_groups[1] == "ips":
4343 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
4344 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
4345 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
4346 638c6349 Guido Trotter
            elif st_groups[1] == "links":
4347 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
4348 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
4349 638c6349 Guido Trotter
              val = []
4350 638c6349 Guido Trotter
              for nicp in i_nicp:
4351 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
4352 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
4353 638c6349 Guido Trotter
                else:
4354 638c6349 Guido Trotter
                  val.append(None)
4355 71c1af58 Iustin Pop
            else:
4356 71c1af58 Iustin Pop
              # index-based item
4357 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
4358 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
4359 71c1af58 Iustin Pop
                val = None
4360 71c1af58 Iustin Pop
              else:
4361 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
4362 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
4363 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
4364 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
4365 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
4366 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
4367 638c6349 Guido Trotter
                elif st_groups[1] == "link":
4368 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
4369 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
4370 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
4371 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
4372 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
4373 638c6349 Guido Trotter
                  else:
4374 638c6349 Guido Trotter
                    val = None
4375 71c1af58 Iustin Pop
                else:
4376 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
4377 71c1af58 Iustin Pop
          else:
4378 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
4379 c1ce76bb Iustin Pop
                           field)
4380 a8083063 Iustin Pop
        else:
4381 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
4382 a8083063 Iustin Pop
        iout.append(val)
4383 a8083063 Iustin Pop
      output.append(iout)
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
    return output
4386 a8083063 Iustin Pop
4387 a8083063 Iustin Pop
4388 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
4389 a8083063 Iustin Pop
  """Failover an instance.
4390 a8083063 Iustin Pop

4391 a8083063 Iustin Pop
  """
4392 a8083063 Iustin Pop
  HPATH = "instance-failover"
4393 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4394 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
4395 c9e5c064 Guido Trotter
  REQ_BGL = False
4396 c9e5c064 Guido Trotter
4397 17c3f802 Guido Trotter
  def CheckArguments(self):
4398 17c3f802 Guido Trotter
    """Check the arguments.
4399 17c3f802 Guido Trotter

4400 17c3f802 Guido Trotter
    """
4401 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4402 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4403 17c3f802 Guido Trotter
4404 c9e5c064 Guido Trotter
  def ExpandNames(self):
4405 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
4406 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
4407 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4408 c9e5c064 Guido Trotter
4409 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
4410 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
4411 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
4412 a8083063 Iustin Pop
4413 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4414 a8083063 Iustin Pop
    """Build hooks env.
4415 a8083063 Iustin Pop

4416 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4417 a8083063 Iustin Pop

4418 a8083063 Iustin Pop
    """
4419 a8083063 Iustin Pop
    env = {
4420 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
4421 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4422 a8083063 Iustin Pop
      }
4423 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4424 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
4425 a8083063 Iustin Pop
    return env, nl, nl
4426 a8083063 Iustin Pop
4427 a8083063 Iustin Pop
  def CheckPrereq(self):
4428 a8083063 Iustin Pop
    """Check prerequisites.
4429 a8083063 Iustin Pop

4430 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4431 a8083063 Iustin Pop

4432 a8083063 Iustin Pop
    """
4433 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4434 c9e5c064 Guido Trotter
    assert self.instance is not None, \
4435 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4436 a8083063 Iustin Pop
4437 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4438 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
4439 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
4440 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
4441 2a710df1 Michael Hanselmann
4442 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
4443 2a710df1 Michael Hanselmann
    if not secondary_nodes:
4444 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
4445 abdf0113 Iustin Pop
                                   "a mirrored disk template")
4446 2a710df1 Michael Hanselmann
4447 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
4448 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
4449 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4450 d27776f0 Iustin Pop
    if instance.admin_up:
4451 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
4452 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4453 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4454 d27776f0 Iustin Pop
                           instance.hypervisor)
4455 d27776f0 Iustin Pop
    else:
4456 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4457 d27776f0 Iustin Pop
                   " instance will not be started")
4458 3a7c308e Guido Trotter
4459 a8083063 Iustin Pop
    # check bridge existance
4460 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4461 a8083063 Iustin Pop
4462 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4463 a8083063 Iustin Pop
    """Failover an instance.
4464 a8083063 Iustin Pop

4465 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
4466 a8083063 Iustin Pop
    starting it on the secondary.
4467 a8083063 Iustin Pop

4468 a8083063 Iustin Pop
    """
4469 a8083063 Iustin Pop
    instance = self.instance
4470 a8083063 Iustin Pop
4471 a8083063 Iustin Pop
    source_node = instance.primary_node
4472 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
4473 a8083063 Iustin Pop
4474 1df79ce6 Michael Hanselmann
    if instance.admin_up:
4475 1df79ce6 Michael Hanselmann
      feedback_fn("* checking disk consistency between source and target")
4476 1df79ce6 Michael Hanselmann
      for dev in instance.disks:
4477 1df79ce6 Michael Hanselmann
        # for drbd, these are drbd over lvm
4478 1df79ce6 Michael Hanselmann
        if not _CheckDiskConsistency(self, dev, target_node, False):
4479 1df79ce6 Michael Hanselmann
          if not self.op.ignore_consistency:
4480 1df79ce6 Michael Hanselmann
            raise errors.OpExecError("Disk %s is degraded on target node,"
4481 1df79ce6 Michael Hanselmann
                                     " aborting failover." % dev.iv_name)
4482 1df79ce6 Michael Hanselmann
    else:
4483 1df79ce6 Michael Hanselmann
      feedback_fn("* not checking disk consistency as instance is not running")
4484 a8083063 Iustin Pop
4485 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
4486 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
4487 9a4f63d1 Iustin Pop
                 instance.name, source_node)
4488 a8083063 Iustin Pop
4489 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4490 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4491 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4492 1fae010f Iustin Pop
    if msg:
4493 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
4494 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4495 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
4496 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
4497 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
4498 24a40d57 Iustin Pop
      else:
4499 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4500 1fae010f Iustin Pop
                                 " node %s: %s" %
4501 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
4502 a8083063 Iustin Pop
4503 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
4504 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
4505 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
4506 a8083063 Iustin Pop
4507 a8083063 Iustin Pop
    instance.primary_node = target_node
4508 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
4509 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
4510 a8083063 Iustin Pop
4511 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
4512 0d68c45d Iustin Pop
    if instance.admin_up:
4513 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
4514 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
4515 9a4f63d1 Iustin Pop
                   instance.name, target_node)
4516 12a0cfbe Guido Trotter
4517 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4518 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
4519 12a0cfbe Guido Trotter
      if not disks_ok:
4520 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4521 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
4522 a8083063 Iustin Pop
4523 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
4524 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4525 4c4e4e1e Iustin Pop
      msg = result.fail_msg
4526 dd279568 Iustin Pop
      if msg:
4527 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4528 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4529 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
4530 a8083063 Iustin Pop
4531 a8083063 Iustin Pop
4532 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
4533 53c776b5 Iustin Pop
  """Migrate an instance.
4534 53c776b5 Iustin Pop

4535 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
4536 53c776b5 Iustin Pop
  which is done with shutdown.
4537 53c776b5 Iustin Pop

4538 53c776b5 Iustin Pop
  """
4539 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
4540 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4541 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
4542 53c776b5 Iustin Pop
4543 53c776b5 Iustin Pop
  REQ_BGL = False
4544 53c776b5 Iustin Pop
4545 53c776b5 Iustin Pop
  def ExpandNames(self):
4546 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
4547 3e06e001 Michael Hanselmann
4548 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
4549 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4550 53c776b5 Iustin Pop
4551 3e06e001 Michael Hanselmann
    self._migrater = TLMigrateInstance(self, self.op.instance_name,
4552 3e06e001 Michael Hanselmann
                                       self.op.live, self.op.cleanup)
4553 3a012b41 Michael Hanselmann
    self.tasklets = [self._migrater]
4554 3e06e001 Michael Hanselmann
4555 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
4556 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
4557 53c776b5 Iustin Pop
      self._LockInstancesNodes()
4558 53c776b5 Iustin Pop
4559 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
4560 53c776b5 Iustin Pop
    """Build hooks env.
4561 53c776b5 Iustin Pop

4562 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4563 53c776b5 Iustin Pop

4564 53c776b5 Iustin Pop
    """
4565 3e06e001 Michael Hanselmann
    instance = self._migrater.instance
4566 3e06e001 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self, instance)
4567 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
4568 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
4569 3e06e001 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(instance.secondary_nodes)
4570 53c776b5 Iustin Pop
    return env, nl, nl
4571 53c776b5 Iustin Pop
4572 3e06e001 Michael Hanselmann
4573 313bcead Iustin Pop
class LUMoveInstance(LogicalUnit):
4574 313bcead Iustin Pop
  """Move an instance by data-copying.
4575 313bcead Iustin Pop

4576 313bcead Iustin Pop
  """
4577 313bcead Iustin Pop
  HPATH = "instance-move"
4578 313bcead Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4579 313bcead Iustin Pop
  _OP_REQP = ["instance_name", "target_node"]
4580 313bcead Iustin Pop
  REQ_BGL = False
4581 313bcead Iustin Pop
4582 17c3f802 Guido Trotter
  def CheckArguments(self):
4583 17c3f802 Guido Trotter
    """Check the arguments.
4584 17c3f802 Guido Trotter

4585 17c3f802 Guido Trotter
    """
4586 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4587 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4588 17c3f802 Guido Trotter
4589 313bcead Iustin Pop
  def ExpandNames(self):
4590 313bcead Iustin Pop
    self._ExpandAndLockInstance()
4591 313bcead Iustin Pop
    target_node = self.cfg.ExpandNodeName(self.op.target_node)
4592 313bcead Iustin Pop
    if target_node is None:
4593 313bcead Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" %
4594 313bcead Iustin Pop
                                  self.op.target_node)
4595 313bcead Iustin Pop
    self.op.target_node = target_node
4596 313bcead Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = [target_node]
4597 313bcead Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4598 313bcead Iustin Pop
4599 313bcead Iustin Pop
  def DeclareLocks(self, level):
4600 313bcead Iustin Pop
    if level == locking.LEVEL_NODE:
4601 313bcead Iustin Pop
      self._LockInstancesNodes(primary_only=True)
4602 313bcead Iustin Pop
4603 313bcead Iustin Pop
  def BuildHooksEnv(self):
4604 313bcead Iustin Pop
    """Build hooks env.
4605 313bcead Iustin Pop

4606 313bcead Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4607 313bcead Iustin Pop

4608 313bcead Iustin Pop
    """
4609 313bcead Iustin Pop
    env = {
4610 313bcead Iustin Pop
      "TARGET_NODE": self.op.target_node,
4611 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4612 313bcead Iustin Pop
      }
4613 313bcead Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4614 313bcead Iustin Pop
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
4615 313bcead Iustin Pop
                                       self.op.target_node]
4616 313bcead Iustin Pop
    return env, nl, nl
4617 313bcead Iustin Pop
4618 313bcead Iustin Pop
  def CheckPrereq(self):
4619 313bcead Iustin Pop
    """Check prerequisites.
4620 313bcead Iustin Pop

4621 313bcead Iustin Pop
    This checks that the instance is in the cluster.
4622 313bcead Iustin Pop

4623 313bcead Iustin Pop
    """
4624 313bcead Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4625 313bcead Iustin Pop
    assert self.instance is not None, \
4626 313bcead Iustin Pop
      "Cannot retrieve locked instance %s" % self.op.instance_name
4627 313bcead Iustin Pop
4628 313bcead Iustin Pop
    node = self.cfg.GetNodeInfo(self.op.target_node)
4629 313bcead Iustin Pop
    assert node is not None, \
4630 313bcead Iustin Pop
      "Cannot retrieve locked node %s" % self.op.target_node
4631 313bcead Iustin Pop
4632 313bcead Iustin Pop
    self.target_node = target_node = node.name
4633 313bcead Iustin Pop
4634 313bcead Iustin Pop
    if target_node == instance.primary_node:
4635 313bcead Iustin Pop
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
4636 313bcead Iustin Pop
                                 (instance.name, target_node))
4637 313bcead Iustin Pop
4638 313bcead Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
4639 313bcead Iustin Pop
4640 313bcead Iustin Pop
    for idx, dsk in enumerate(instance.disks):
4641 313bcead Iustin Pop
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
4642 313bcead Iustin Pop
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
4643 313bcead Iustin Pop
                                   " cannot copy")
4644 313bcead Iustin Pop
4645 313bcead Iustin Pop
    _CheckNodeOnline(self, target_node)
4646 313bcead Iustin Pop
    _CheckNodeNotDrained(self, target_node)
4647 313bcead Iustin Pop
4648 313bcead Iustin Pop
    if instance.admin_up:
4649 313bcead Iustin Pop
      # check memory requirements on the secondary node
4650 313bcead Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
4651 313bcead Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
4652 313bcead Iustin Pop
                           instance.hypervisor)
4653 313bcead Iustin Pop
    else:
4654 313bcead Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
4655 313bcead Iustin Pop
                   " instance will not be started")
4656 313bcead Iustin Pop
4657 313bcead Iustin Pop
    # check bridge existance
4658 313bcead Iustin Pop
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4659 313bcead Iustin Pop
4660 313bcead Iustin Pop
  def Exec(self, feedback_fn):
4661 313bcead Iustin Pop
    """Move an instance.
4662 313bcead Iustin Pop

4663 313bcead Iustin Pop
    The move is done by shutting it down on its present node, copying
4664 313bcead Iustin Pop
    the data over (slow) and starting it on the new node.
4665 313bcead Iustin Pop

4666 313bcead Iustin Pop
    """
4667 313bcead Iustin Pop
    instance = self.instance
4668 313bcead Iustin Pop
4669 313bcead Iustin Pop
    source_node = instance.primary_node
4670 313bcead Iustin Pop
    target_node = self.target_node
4671 313bcead Iustin Pop
4672 313bcead Iustin Pop
    self.LogInfo("Shutting down instance %s on source node %s",
4673 313bcead Iustin Pop
                 instance.name, source_node)
4674 313bcead Iustin Pop
4675 17c3f802 Guido Trotter
    result = self.rpc.call_instance_shutdown(source_node, instance,
4676 17c3f802 Guido Trotter
                                             self.shutdown_timeout)
4677 313bcead Iustin Pop
    msg = result.fail_msg
4678 313bcead Iustin Pop
    if msg:
4679 313bcead Iustin Pop
      if self.op.ignore_consistency:
4680 313bcead Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
4681 313bcead Iustin Pop
                             " Proceeding anyway. Please make sure node"
4682 313bcead Iustin Pop
                             " %s is down. Error details: %s",
4683 313bcead Iustin Pop
                             instance.name, source_node, source_node, msg)
4684 313bcead Iustin Pop
      else:
4685 313bcead Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
4686 313bcead Iustin Pop
                                 " node %s: %s" %
4687 313bcead Iustin Pop
                                 (instance.name, source_node, msg))
4688 313bcead Iustin Pop
4689 313bcead Iustin Pop
    # create the target disks
4690 313bcead Iustin Pop
    try:
4691 313bcead Iustin Pop
      _CreateDisks(self, instance, target_node=target_node)
4692 313bcead Iustin Pop
    except errors.OpExecError:
4693 313bcead Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4694 313bcead Iustin Pop
      try:
4695 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4696 313bcead Iustin Pop
      finally:
4697 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4698 313bcead Iustin Pop
        raise
4699 313bcead Iustin Pop
4700 313bcead Iustin Pop
    cluster_name = self.cfg.GetClusterInfo().cluster_name
4701 313bcead Iustin Pop
4702 313bcead Iustin Pop
    errs = []
4703 313bcead Iustin Pop
    # activate, get path, copy the data over
4704 313bcead Iustin Pop
    for idx, disk in enumerate(instance.disks):
4705 313bcead Iustin Pop
      self.LogInfo("Copying data for disk %d", idx)
4706 313bcead Iustin Pop
      result = self.rpc.call_blockdev_assemble(target_node, disk,
4707 313bcead Iustin Pop
                                               instance.name, True)
4708 313bcead Iustin Pop
      if result.fail_msg:
4709 313bcead Iustin Pop
        self.LogWarning("Can't assemble newly created disk %d: %s",
4710 313bcead Iustin Pop
                        idx, result.fail_msg)
4711 313bcead Iustin Pop
        errs.append(result.fail_msg)
4712 313bcead Iustin Pop
        break
4713 313bcead Iustin Pop
      dev_path = result.payload
4714 313bcead Iustin Pop
      result = self.rpc.call_blockdev_export(source_node, disk,
4715 313bcead Iustin Pop
                                             target_node, dev_path,
4716 313bcead Iustin Pop
                                             cluster_name)
4717 313bcead Iustin Pop
      if result.fail_msg:
4718 313bcead Iustin Pop
        self.LogWarning("Can't copy data over for disk %d: %s",
4719 313bcead Iustin Pop
                        idx, result.fail_msg)
4720 313bcead Iustin Pop
        errs.append(result.fail_msg)
4721 313bcead Iustin Pop
        break
4722 313bcead Iustin Pop
4723 313bcead Iustin Pop
    if errs:
4724 313bcead Iustin Pop
      self.LogWarning("Some disks failed to copy, aborting")
4725 313bcead Iustin Pop
      try:
4726 313bcead Iustin Pop
        _RemoveDisks(self, instance, target_node=target_node)
4727 313bcead Iustin Pop
      finally:
4728 313bcead Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
4729 313bcead Iustin Pop
        raise errors.OpExecError("Errors during disk copy: %s" %
4730 313bcead Iustin Pop
                                 (",".join(errs),))
4731 313bcead Iustin Pop
4732 313bcead Iustin Pop
    instance.primary_node = target_node
4733 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
4734 313bcead Iustin Pop
4735 313bcead Iustin Pop
    self.LogInfo("Removing the disks on the original node")
4736 313bcead Iustin Pop
    _RemoveDisks(self, instance, target_node=source_node)
4737 313bcead Iustin Pop
4738 313bcead Iustin Pop
    # Only start the instance if it's marked as up
4739 313bcead Iustin Pop
    if instance.admin_up:
4740 313bcead Iustin Pop
      self.LogInfo("Starting instance %s on node %s",
4741 313bcead Iustin Pop
                   instance.name, target_node)
4742 313bcead Iustin Pop
4743 313bcead Iustin Pop
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
4744 313bcead Iustin Pop
                                           ignore_secondaries=True)
4745 313bcead Iustin Pop
      if not disks_ok:
4746 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4747 313bcead Iustin Pop
        raise errors.OpExecError("Can't activate the instance's disks")
4748 313bcead Iustin Pop
4749 313bcead Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
4750 313bcead Iustin Pop
      msg = result.fail_msg
4751 313bcead Iustin Pop
      if msg:
4752 313bcead Iustin Pop
        _ShutdownInstanceDisks(self, instance)
4753 313bcead Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
4754 313bcead Iustin Pop
                                 (instance.name, target_node, msg))
4755 313bcead Iustin Pop
4756 313bcead Iustin Pop
4757 80cb875c Michael Hanselmann
class LUMigrateNode(LogicalUnit):
4758 80cb875c Michael Hanselmann
  """Migrate all instances from a node.
4759 80cb875c Michael Hanselmann

4760 80cb875c Michael Hanselmann
  """
4761 80cb875c Michael Hanselmann
  HPATH = "node-migrate"
4762 80cb875c Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
4763 80cb875c Michael Hanselmann
  _OP_REQP = ["node_name", "live"]
4764 80cb875c Michael Hanselmann
  REQ_BGL = False
4765 80cb875c Michael Hanselmann
4766 80cb875c Michael Hanselmann
  def ExpandNames(self):
4767 80cb875c Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
4768 80cb875c Michael Hanselmann
    if self.op.node_name is None:
4769 80cb875c Michael Hanselmann
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
4770 80cb875c Michael Hanselmann
4771 80cb875c Michael Hanselmann
    self.needed_locks = {
4772 80cb875c Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
4773 80cb875c Michael Hanselmann
      }
4774 80cb875c Michael Hanselmann
4775 80cb875c Michael Hanselmann
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4776 80cb875c Michael Hanselmann
4777 80cb875c Michael Hanselmann
    # Create tasklets for migrating instances for all instances on this node
4778 80cb875c Michael Hanselmann
    names = []
4779 80cb875c Michael Hanselmann
    tasklets = []
4780 80cb875c Michael Hanselmann
4781 80cb875c Michael Hanselmann
    for inst in _GetNodePrimaryInstances(self.cfg, self.op.node_name):
4782 80cb875c Michael Hanselmann
      logging.debug("Migrating instance %s", inst.name)
4783 80cb875c Michael Hanselmann
      names.append(inst.name)
4784 80cb875c Michael Hanselmann
4785 80cb875c Michael Hanselmann
      tasklets.append(TLMigrateInstance(self, inst.name, self.op.live, False))
4786 80cb875c Michael Hanselmann
4787 80cb875c Michael Hanselmann
    self.tasklets = tasklets
4788 80cb875c Michael Hanselmann
4789 80cb875c Michael Hanselmann
    # Declare instance locks
4790 80cb875c Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = names
4791 80cb875c Michael Hanselmann
4792 80cb875c Michael Hanselmann
  def DeclareLocks(self, level):
4793 80cb875c Michael Hanselmann
    if level == locking.LEVEL_NODE:
4794 80cb875c Michael Hanselmann
      self._LockInstancesNodes()
4795 80cb875c Michael Hanselmann
4796 80cb875c Michael Hanselmann
  def BuildHooksEnv(self):
4797 80cb875c Michael Hanselmann
    """Build hooks env.
4798 80cb875c Michael Hanselmann

4799 80cb875c Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
4800 80cb875c Michael Hanselmann

4801 80cb875c Michael Hanselmann
    """
4802 80cb875c Michael Hanselmann
    env = {
4803 80cb875c Michael Hanselmann
      "NODE_NAME": self.op.node_name,
4804 80cb875c Michael Hanselmann
      }
4805 80cb875c Michael Hanselmann
4806 80cb875c Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
4807 80cb875c Michael Hanselmann
4808 80cb875c Michael Hanselmann
    return (env, nl, nl)
4809 80cb875c Michael Hanselmann
4810 80cb875c Michael Hanselmann
4811 3e06e001 Michael Hanselmann
class TLMigrateInstance(Tasklet):
4812 3e06e001 Michael Hanselmann
  def __init__(self, lu, instance_name, live, cleanup):
4813 3e06e001 Michael Hanselmann
    """Initializes this class.
4814 3e06e001 Michael Hanselmann

4815 3e06e001 Michael Hanselmann
    """
4816 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
4817 464243a7 Michael Hanselmann
4818 3e06e001 Michael Hanselmann
    # Parameters
4819 3e06e001 Michael Hanselmann
    self.instance_name = instance_name
4820 3e06e001 Michael Hanselmann
    self.live = live
4821 3e06e001 Michael Hanselmann
    self.cleanup = cleanup
4822 3e06e001 Michael Hanselmann
4823 53c776b5 Iustin Pop
  def CheckPrereq(self):
4824 53c776b5 Iustin Pop
    """Check prerequisites.
4825 53c776b5 Iustin Pop

4826 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
4827 53c776b5 Iustin Pop

4828 53c776b5 Iustin Pop
    """
4829 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4830 3e06e001 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.instance_name))
4831 53c776b5 Iustin Pop
    if instance is None:
4832 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
4833 3e06e001 Michael Hanselmann
                                 self.instance_name)
4834 53c776b5 Iustin Pop
4835 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4836 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
4837 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
4838 53c776b5 Iustin Pop
4839 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
4840 53c776b5 Iustin Pop
    if not secondary_nodes:
4841 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
4842 733a2b6a Iustin Pop
                                      " drbd8 disk template")
4843 53c776b5 Iustin Pop
4844 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
4845 53c776b5 Iustin Pop
4846 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
4847 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
4848 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
4849 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
4850 53c776b5 Iustin Pop
                         instance.hypervisor)
4851 53c776b5 Iustin Pop
4852 53c776b5 Iustin Pop
    # check bridge existance
4853 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
4854 53c776b5 Iustin Pop
4855 3e06e001 Michael Hanselmann
    if not self.cleanup:
4856 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
4857 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
4858 53c776b5 Iustin Pop
                                                 instance)
4859 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
4860 53c776b5 Iustin Pop
4861 53c776b5 Iustin Pop
    self.instance = instance
4862 53c776b5 Iustin Pop
4863 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
4864 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
4865 53c776b5 Iustin Pop

4866 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
4867 53c776b5 Iustin Pop

4868 53c776b5 Iustin Pop
    """
4869 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
4870 53c776b5 Iustin Pop
    all_done = False
4871 53c776b5 Iustin Pop
    while not all_done:
4872 53c776b5 Iustin Pop
      all_done = True
4873 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
4874 53c776b5 Iustin Pop
                                            self.nodes_ip,
4875 53c776b5 Iustin Pop
                                            self.instance.disks)
4876 53c776b5 Iustin Pop
      min_percent = 100
4877 53c776b5 Iustin Pop
      for node, nres in result.items():
4878 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
4879 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
4880 53c776b5 Iustin Pop
        all_done = all_done and node_done
4881 53c776b5 Iustin Pop
        if node_percent is not None:
4882 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
4883 53c776b5 Iustin Pop
      if not all_done:
4884 53c776b5 Iustin Pop
        if min_percent < 100:
4885 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
4886 53c776b5 Iustin Pop
        time.sleep(2)
4887 53c776b5 Iustin Pop
4888 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
4889 53c776b5 Iustin Pop
    """Demote a node to secondary.
4890 53c776b5 Iustin Pop

4891 53c776b5 Iustin Pop
    """
4892 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
4893 53c776b5 Iustin Pop
4894 53c776b5 Iustin Pop
    for dev in self.instance.disks:
4895 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
4896 53c776b5 Iustin Pop
4897 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
4898 53c776b5 Iustin Pop
                                          self.instance.disks)
4899 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
4900 53c776b5 Iustin Pop
4901 53c776b5 Iustin Pop
  def _GoStandalone(self):
4902 53c776b5 Iustin Pop
    """Disconnect from the network.
4903 53c776b5 Iustin Pop

4904 53c776b5 Iustin Pop
    """
4905 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
4906 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
4907 53c776b5 Iustin Pop
                                               self.instance.disks)
4908 53c776b5 Iustin Pop
    for node, nres in result.items():
4909 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
4910 53c776b5 Iustin Pop
4911 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
4912 53c776b5 Iustin Pop
    """Reconnect to the network.
4913 53c776b5 Iustin Pop

4914 53c776b5 Iustin Pop
    """
4915 53c776b5 Iustin Pop
    if multimaster:
4916 53c776b5 Iustin Pop
      msg = "dual-master"
4917 53c776b5 Iustin Pop
    else:
4918 53c776b5 Iustin Pop
      msg = "single-master"
4919 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
4920 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
4921 53c776b5 Iustin Pop
                                           self.instance.disks,
4922 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
4923 53c776b5 Iustin Pop
    for node, nres in result.items():
4924 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
4925 53c776b5 Iustin Pop
4926 53c776b5 Iustin Pop
  def _ExecCleanup(self):
4927 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
4928 53c776b5 Iustin Pop

4929 53c776b5 Iustin Pop
    The cleanup is done by:
4930 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4931 53c776b5 Iustin Pop
        (and update the config if needed)
4932 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4933 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4934 53c776b5 Iustin Pop
      - disconnect from the network
4935 53c776b5 Iustin Pop
      - change disks into single-master mode
4936 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4937 53c776b5 Iustin Pop

4938 53c776b5 Iustin Pop
    """
4939 53c776b5 Iustin Pop
    instance = self.instance
4940 53c776b5 Iustin Pop
    target_node = self.target_node
4941 53c776b5 Iustin Pop
    source_node = self.source_node
4942 53c776b5 Iustin Pop
4943 53c776b5 Iustin Pop
    # check running on only one node
4944 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4945 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4946 53c776b5 Iustin Pop
                     " a bad state)")
4947 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4948 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4949 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
4950 53c776b5 Iustin Pop
4951 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
4952 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
4953 53c776b5 Iustin Pop
4954 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4955 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4956 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4957 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4958 53c776b5 Iustin Pop
                               " and restart this operation.")
4959 53c776b5 Iustin Pop
4960 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4961 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4962 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4963 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4964 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4965 53c776b5 Iustin Pop
4966 53c776b5 Iustin Pop
    if runningon_target:
4967 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4968 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4969 53c776b5 Iustin Pop
                       " updating config" % target_node)
4970 53c776b5 Iustin Pop
      instance.primary_node = target_node
4971 a4eae71f Michael Hanselmann
      self.cfg.Update(instance, self.feedback_fn)
4972 53c776b5 Iustin Pop
      demoted_node = source_node
4973 53c776b5 Iustin Pop
    else:
4974 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4975 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4976 53c776b5 Iustin Pop
      demoted_node = target_node
4977 53c776b5 Iustin Pop
4978 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4979 53c776b5 Iustin Pop
    try:
4980 53c776b5 Iustin Pop
      self._WaitUntilSync()
4981 53c776b5 Iustin Pop
    except errors.OpExecError:
4982 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4983 53c776b5 Iustin Pop
      # won't be able to sync
4984 53c776b5 Iustin Pop
      pass
4985 53c776b5 Iustin Pop
    self._GoStandalone()
4986 53c776b5 Iustin Pop
    self._GoReconnect(False)
4987 53c776b5 Iustin Pop
    self._WaitUntilSync()
4988 53c776b5 Iustin Pop
4989 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4990 53c776b5 Iustin Pop
4991 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4992 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4993 6906a9d8 Guido Trotter

4994 6906a9d8 Guido Trotter
    """
4995 6906a9d8 Guido Trotter
    target_node = self.target_node
4996 6906a9d8 Guido Trotter
    try:
4997 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4998 6906a9d8 Guido Trotter
      self._GoStandalone()
4999 6906a9d8 Guido Trotter
      self._GoReconnect(False)
5000 6906a9d8 Guido Trotter
      self._WaitUntilSync()
5001 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
5002 3e06e001 Michael Hanselmann
      self.lu.LogWarning("Migration failed and I can't reconnect the"
5003 3e06e001 Michael Hanselmann
                         " drives: error '%s'\n"
5004 3e06e001 Michael Hanselmann
                         "Please look and recover the instance status" %
5005 3e06e001 Michael Hanselmann
                         str(err))
5006 6906a9d8 Guido Trotter
5007 6906a9d8 Guido Trotter
  def _AbortMigration(self):
5008 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
5009 6906a9d8 Guido Trotter

5010 6906a9d8 Guido Trotter
    """
5011 6906a9d8 Guido Trotter
    instance = self.instance
5012 6906a9d8 Guido Trotter
    target_node = self.target_node
5013 6906a9d8 Guido Trotter
    migration_info = self.migration_info
5014 6906a9d8 Guido Trotter
5015 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
5016 6906a9d8 Guido Trotter
                                                    instance,
5017 6906a9d8 Guido Trotter
                                                    migration_info,
5018 6906a9d8 Guido Trotter
                                                    False)
5019 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
5020 6906a9d8 Guido Trotter
    if abort_msg:
5021 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
5022 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
5023 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
5024 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
5025 6906a9d8 Guido Trotter
5026 53c776b5 Iustin Pop
  def _ExecMigration(self):
5027 53c776b5 Iustin Pop
    """Migrate an instance.
5028 53c776b5 Iustin Pop

5029 53c776b5 Iustin Pop
    The migrate is done by:
5030 53c776b5 Iustin Pop
      - change the disks into dual-master mode
5031 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
5032 53c776b5 Iustin Pop
      - migrate the instance
5033 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
5034 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
5035 53c776b5 Iustin Pop
      - change disks into single-master mode
5036 53c776b5 Iustin Pop

5037 53c776b5 Iustin Pop
    """
5038 53c776b5 Iustin Pop
    instance = self.instance
5039 53c776b5 Iustin Pop
    target_node = self.target_node
5040 53c776b5 Iustin Pop
    source_node = self.source_node
5041 53c776b5 Iustin Pop
5042 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
5043 53c776b5 Iustin Pop
    for dev in instance.disks:
5044 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
5045 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
5046 53c776b5 Iustin Pop
                                 " synchronized on target node,"
5047 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
5048 53c776b5 Iustin Pop
5049 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
5050 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
5051 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5052 6906a9d8 Guido Trotter
    if msg:
5053 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
5054 0959c824 Iustin Pop
                 (source_node, msg))
5055 6906a9d8 Guido Trotter
      logging.error(log_err)
5056 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
5057 6906a9d8 Guido Trotter
5058 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
5059 6906a9d8 Guido Trotter
5060 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
5061 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
5062 53c776b5 Iustin Pop
    self._GoStandalone()
5063 53c776b5 Iustin Pop
    self._GoReconnect(True)
5064 53c776b5 Iustin Pop
    self._WaitUntilSync()
5065 53c776b5 Iustin Pop
5066 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
5067 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
5068 6906a9d8 Guido Trotter
                                           instance,
5069 6906a9d8 Guido Trotter
                                           migration_info,
5070 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
5071 6906a9d8 Guido Trotter
5072 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5073 6906a9d8 Guido Trotter
    if msg:
5074 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
5075 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
5076 6906a9d8 Guido Trotter
      self._AbortMigration()
5077 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5078 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
5079 6906a9d8 Guido Trotter
                               (instance.name, msg))
5080 6906a9d8 Guido Trotter
5081 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
5082 53c776b5 Iustin Pop
    time.sleep(10)
5083 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
5084 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
5085 3e06e001 Michael Hanselmann
                                            self.live)
5086 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5087 53c776b5 Iustin Pop
    if msg:
5088 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
5089 53c776b5 Iustin Pop
                    " disk status: %s", msg)
5090 6906a9d8 Guido Trotter
      self._AbortMigration()
5091 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
5092 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
5093 53c776b5 Iustin Pop
                               (instance.name, msg))
5094 53c776b5 Iustin Pop
    time.sleep(10)
5095 53c776b5 Iustin Pop
5096 53c776b5 Iustin Pop
    instance.primary_node = target_node
5097 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
5098 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, self.feedback_fn)
5099 53c776b5 Iustin Pop
5100 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
5101 6906a9d8 Guido Trotter
                                              instance,
5102 6906a9d8 Guido Trotter
                                              migration_info,
5103 6906a9d8 Guido Trotter
                                              True)
5104 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5105 6906a9d8 Guido Trotter
    if msg:
5106 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
5107 6906a9d8 Guido Trotter
                    " %s" % msg)
5108 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
5109 6906a9d8 Guido Trotter
                               msg)
5110 6906a9d8 Guido Trotter
5111 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
5112 53c776b5 Iustin Pop
    self._WaitUntilSync()
5113 53c776b5 Iustin Pop
    self._GoStandalone()
5114 53c776b5 Iustin Pop
    self._GoReconnect(False)
5115 53c776b5 Iustin Pop
    self._WaitUntilSync()
5116 53c776b5 Iustin Pop
5117 53c776b5 Iustin Pop
    self.feedback_fn("* done")
5118 53c776b5 Iustin Pop
5119 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
5120 53c776b5 Iustin Pop
    """Perform the migration.
5121 53c776b5 Iustin Pop

5122 53c776b5 Iustin Pop
    """
5123 80cb875c Michael Hanselmann
    feedback_fn("Migrating instance %s" % self.instance.name)
5124 80cb875c Michael Hanselmann
5125 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
5126 53c776b5 Iustin Pop
5127 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
5128 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
5129 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
5130 53c776b5 Iustin Pop
    self.nodes_ip = {
5131 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
5132 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
5133 53c776b5 Iustin Pop
      }
5134 3e06e001 Michael Hanselmann
5135 3e06e001 Michael Hanselmann
    if self.cleanup:
5136 53c776b5 Iustin Pop
      return self._ExecCleanup()
5137 53c776b5 Iustin Pop
    else:
5138 53c776b5 Iustin Pop
      return self._ExecMigration()
5139 53c776b5 Iustin Pop
5140 53c776b5 Iustin Pop
5141 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
5142 428958aa Iustin Pop
                    info, force_open):
5143 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
5144 a8083063 Iustin Pop

5145 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
5146 a8083063 Iustin Pop
  all its children.
5147 a8083063 Iustin Pop

5148 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
5149 a8083063 Iustin Pop

5150 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
5151 428958aa Iustin Pop
  @param node: the node on which to create the device
5152 428958aa Iustin Pop
  @type instance: L{objects.Instance}
5153 428958aa Iustin Pop
  @param instance: the instance which owns the device
5154 428958aa Iustin Pop
  @type device: L{objects.Disk}
5155 428958aa Iustin Pop
  @param device: the device to create
5156 428958aa Iustin Pop
  @type force_create: boolean
5157 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
5158 428958aa Iustin Pop
      will be change to True whenever we find a device which has
5159 428958aa Iustin Pop
      CreateOnSecondary() attribute
5160 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5161 428958aa Iustin Pop
      (this will be represented as a LVM tag)
5162 428958aa Iustin Pop
  @type force_open: boolean
5163 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
5164 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5165 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
5166 428958aa Iustin Pop
      the child assembly and the device own Open() execution
5167 428958aa Iustin Pop

5168 a8083063 Iustin Pop
  """
5169 a8083063 Iustin Pop
  if device.CreateOnSecondary():
5170 428958aa Iustin Pop
    force_create = True
5171 796cab27 Iustin Pop
5172 a8083063 Iustin Pop
  if device.children:
5173 a8083063 Iustin Pop
    for child in device.children:
5174 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
5175 428958aa Iustin Pop
                      info, force_open)
5176 a8083063 Iustin Pop
5177 428958aa Iustin Pop
  if not force_create:
5178 796cab27 Iustin Pop
    return
5179 796cab27 Iustin Pop
5180 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
5181 de12473a Iustin Pop
5182 de12473a Iustin Pop
5183 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
5184 de12473a Iustin Pop
  """Create a single block device on a given node.
5185 de12473a Iustin Pop

5186 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
5187 de12473a Iustin Pop
  created in advance.
5188 de12473a Iustin Pop

5189 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
5190 de12473a Iustin Pop
  @param node: the node on which to create the device
5191 de12473a Iustin Pop
  @type instance: L{objects.Instance}
5192 de12473a Iustin Pop
  @param instance: the instance which owns the device
5193 de12473a Iustin Pop
  @type device: L{objects.Disk}
5194 de12473a Iustin Pop
  @param device: the device to create
5195 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
5196 de12473a Iustin Pop
      (this will be represented as a LVM tag)
5197 de12473a Iustin Pop
  @type force_open: boolean
5198 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
5199 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
5200 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
5201 de12473a Iustin Pop
      the child assembly and the device own Open() execution
5202 de12473a Iustin Pop

5203 de12473a Iustin Pop
  """
5204 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
5205 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
5206 428958aa Iustin Pop
                                       instance.name, force_open, info)
5207 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
5208 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
5209 a8083063 Iustin Pop
  if device.physical_id is None:
5210 0959c824 Iustin Pop
    device.physical_id = result.payload
5211 a8083063 Iustin Pop
5212 a8083063 Iustin Pop
5213 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
5214 923b1523 Iustin Pop
  """Generate a suitable LV name.
5215 923b1523 Iustin Pop

5216 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
5217 923b1523 Iustin Pop

5218 923b1523 Iustin Pop
  """
5219 923b1523 Iustin Pop
  results = []
5220 923b1523 Iustin Pop
  for val in exts:
5221 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
5222 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
5223 923b1523 Iustin Pop
  return results
5224 923b1523 Iustin Pop
5225 923b1523 Iustin Pop
5226 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
5227 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
5228 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
5229 a1f445d3 Iustin Pop

5230 a1f445d3 Iustin Pop
  """
5231 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
5232 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5233 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
5234 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5235 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
5236 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5237 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
5238 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
5239 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
5240 f9518d38 Iustin Pop
                                      p_minor, s_minor,
5241 f9518d38 Iustin Pop
                                      shared_secret),
5242 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
5243 a1f445d3 Iustin Pop
                          iv_name=iv_name)
5244 a1f445d3 Iustin Pop
  return drbd_dev
5245 a1f445d3 Iustin Pop
5246 7c0d6283 Michael Hanselmann
5247 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
5248 a8083063 Iustin Pop
                          instance_name, primary_node,
5249 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
5250 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
5251 e2a65344 Iustin Pop
                          base_index):
5252 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
5253 a8083063 Iustin Pop

5254 a8083063 Iustin Pop
  """
5255 a8083063 Iustin Pop
  #TODO: compute space requirements
5256 a8083063 Iustin Pop
5257 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
5258 08db7c5c Iustin Pop
  disk_count = len(disk_info)
5259 08db7c5c Iustin Pop
  disks = []
5260 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
5261 08db7c5c Iustin Pop
    pass
5262 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
5263 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
5264 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5265 923b1523 Iustin Pop
5266 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5267 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
5268 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5269 e2a65344 Iustin Pop
      disk_index = idx + base_index
5270 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
5271 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
5272 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
5273 6ec66eae Iustin Pop
                              mode=disk["mode"])
5274 08db7c5c Iustin Pop
      disks.append(disk_dev)
5275 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
5276 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
5277 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
5278 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
5279 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
5280 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
5281 08db7c5c Iustin Pop
5282 e6c1ff2f Iustin Pop
    names = []
5283 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
5284 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
5285 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
5286 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
5287 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5288 112050d9 Iustin Pop
      disk_index = idx + base_index
5289 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
5290 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
5291 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
5292 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
5293 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
5294 08db7c5c Iustin Pop
      disks.append(disk_dev)
5295 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
5296 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
5297 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
5298 0f1a06e3 Manuel Franceschini
5299 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
5300 112050d9 Iustin Pop
      disk_index = idx + base_index
5301 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
5302 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
5303 08db7c5c Iustin Pop
                              logical_id=(file_driver,
5304 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
5305 43e99cff Guido Trotter
                                                         disk_index)),
5306 6ec66eae Iustin Pop
                              mode=disk["mode"])
5307 08db7c5c Iustin Pop
      disks.append(disk_dev)
5308 a8083063 Iustin Pop
  else:
5309 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
5310 a8083063 Iustin Pop
  return disks
5311 a8083063 Iustin Pop
5312 a8083063 Iustin Pop
5313 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
5314 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
5315 3ecf6786 Iustin Pop

5316 3ecf6786 Iustin Pop
  """
5317 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
5318 a0c3fea1 Michael Hanselmann
5319 a0c3fea1 Michael Hanselmann
5320 621b7678 Iustin Pop
def _CreateDisks(lu, instance, to_skip=None, target_node=None):
5321 a8083063 Iustin Pop
  """Create all disks for an instance.
5322 a8083063 Iustin Pop

5323 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
5324 a8083063 Iustin Pop

5325 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5326 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5327 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5328 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
5329 bd315bfa Iustin Pop
  @type to_skip: list
5330 bd315bfa Iustin Pop
  @param to_skip: list of indices to skip
5331 621b7678 Iustin Pop
  @type target_node: string
5332 621b7678 Iustin Pop
  @param target_node: if passed, overrides the target node for creation
5333 e4376078 Iustin Pop
  @rtype: boolean
5334 e4376078 Iustin Pop
  @return: the success of the creation
5335 a8083063 Iustin Pop

5336 a8083063 Iustin Pop
  """
5337 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
5338 621b7678 Iustin Pop
  if target_node is None:
5339 621b7678 Iustin Pop
    pnode = instance.primary_node
5340 621b7678 Iustin Pop
    all_nodes = instance.all_nodes
5341 621b7678 Iustin Pop
  else:
5342 621b7678 Iustin Pop
    pnode = target_node
5343 621b7678 Iustin Pop
    all_nodes = [pnode]
5344 a0c3fea1 Michael Hanselmann
5345 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5346 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5347 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
5348 0f1a06e3 Manuel Franceschini
5349 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
5350 9b4127eb Guido Trotter
                 " node %s" % (file_storage_dir, pnode))
5351 0f1a06e3 Manuel Franceschini
5352 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
5353 24991749 Iustin Pop
  # LUSetInstanceParams
5354 bd315bfa Iustin Pop
  for idx, device in enumerate(instance.disks):
5355 bd315bfa Iustin Pop
    if to_skip and idx in to_skip:
5356 bd315bfa Iustin Pop
      continue
5357 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
5358 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
5359 a8083063 Iustin Pop
    #HARDCODE
5360 621b7678 Iustin Pop
    for node in all_nodes:
5361 428958aa Iustin Pop
      f_create = node == pnode
5362 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
5363 a8083063 Iustin Pop
5364 a8083063 Iustin Pop
5365 621b7678 Iustin Pop
def _RemoveDisks(lu, instance, target_node=None):
5366 a8083063 Iustin Pop
  """Remove all disks for an instance.
5367 a8083063 Iustin Pop

5368 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
5369 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
5370 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
5371 a8083063 Iustin Pop
  with `_CreateDisks()`).
5372 a8083063 Iustin Pop

5373 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
5374 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
5375 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
5376 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
5377 621b7678 Iustin Pop
  @type target_node: string
5378 621b7678 Iustin Pop
  @param target_node: used to override the node on which to remove the disks
5379 e4376078 Iustin Pop
  @rtype: boolean
5380 e4376078 Iustin Pop
  @return: the success of the removal
5381 a8083063 Iustin Pop

5382 a8083063 Iustin Pop
  """
5383 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
5384 a8083063 Iustin Pop
5385 e1bc0878 Iustin Pop
  all_result = True
5386 a8083063 Iustin Pop
  for device in instance.disks:
5387 621b7678 Iustin Pop
    if target_node:
5388 621b7678 Iustin Pop
      edata = [(target_node, device)]
5389 621b7678 Iustin Pop
    else:
5390 621b7678 Iustin Pop
      edata = device.ComputeNodeTree(instance.primary_node)
5391 621b7678 Iustin Pop
    for node, disk in edata:
5392 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
5393 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
5394 e1bc0878 Iustin Pop
      if msg:
5395 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
5396 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
5397 e1bc0878 Iustin Pop
        all_result = False
5398 0f1a06e3 Manuel Franceschini
5399 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
5400 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
5401 dfc2a24c Guido Trotter
    if target_node:
5402 dfc2a24c Guido Trotter
      tgt = target_node
5403 621b7678 Iustin Pop
    else:
5404 dfc2a24c Guido Trotter
      tgt = instance.primary_node
5405 621b7678 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
5406 621b7678 Iustin Pop
    if result.fail_msg:
5407 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
5408 621b7678 Iustin Pop
                    file_storage_dir, instance.primary_node, result.fail_msg)
5409 e1bc0878 Iustin Pop
      all_result = False
5410 0f1a06e3 Manuel Franceschini
5411 e1bc0878 Iustin Pop
  return all_result
5412 a8083063 Iustin Pop
5413 a8083063 Iustin Pop
5414 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
5415 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
5416 e2fe6369 Iustin Pop

5417 e2fe6369 Iustin Pop
  """
5418 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
5419 e2fe6369 Iustin Pop
  req_size_dict = {
5420 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
5421 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
5422 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
5423 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
5424 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
5425 e2fe6369 Iustin Pop
  }
5426 e2fe6369 Iustin Pop
5427 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
5428 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
5429 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
5430 e2fe6369 Iustin Pop
5431 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
5432 e2fe6369 Iustin Pop
5433 e2fe6369 Iustin Pop
5434 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
5435 74409b12 Iustin Pop
  """Hypervisor parameter validation.
5436 74409b12 Iustin Pop

5437 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
5438 74409b12 Iustin Pop
  used in both instance create and instance modify.
5439 74409b12 Iustin Pop

5440 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
5441 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
5442 74409b12 Iustin Pop
  @type nodenames: list
5443 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
5444 74409b12 Iustin Pop
  @type hvname: string
5445 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
5446 74409b12 Iustin Pop
  @type hvparams: dict
5447 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
5448 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
5449 74409b12 Iustin Pop

5450 74409b12 Iustin Pop
  """
5451 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
5452 74409b12 Iustin Pop
                                                  hvname,
5453 74409b12 Iustin Pop
                                                  hvparams)
5454 74409b12 Iustin Pop
  for node in nodenames:
5455 781de953 Iustin Pop
    info = hvinfo[node]
5456 68c6f21c Iustin Pop
    if info.offline:
5457 68c6f21c Iustin Pop
      continue
5458 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
5459 74409b12 Iustin Pop
5460 74409b12 Iustin Pop
5461 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
5462 a8083063 Iustin Pop
  """Create an instance.
5463 a8083063 Iustin Pop

5464 a8083063 Iustin Pop
  """
5465 a8083063 Iustin Pop
  HPATH = "instance-add"
5466 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5467 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
5468 08db7c5c Iustin Pop
              "mode", "start",
5469 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
5470 338e51e8 Iustin Pop
              "hvparams", "beparams"]
5471 7baf741d Guido Trotter
  REQ_BGL = False
5472 7baf741d Guido Trotter
5473 7baf741d Guido Trotter
  def _ExpandNode(self, node):
5474 7baf741d Guido Trotter
    """Expands and checks one node name.
5475 7baf741d Guido Trotter

5476 7baf741d Guido Trotter
    """
5477 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
5478 7baf741d Guido Trotter
    if node_full is None:
5479 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
5480 7baf741d Guido Trotter
    return node_full
5481 7baf741d Guido Trotter
5482 7baf741d Guido Trotter
  def ExpandNames(self):
5483 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
5484 7baf741d Guido Trotter

5485 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
5486 7baf741d Guido Trotter

5487 7baf741d Guido Trotter
    """
5488 7baf741d Guido Trotter
    self.needed_locks = {}
5489 7baf741d Guido Trotter
5490 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
5491 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
5492 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
5493 7baf741d Guido Trotter
        setattr(self.op, attr, None)
5494 7baf741d Guido Trotter
5495 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
5496 4b2f38dd Iustin Pop
5497 7baf741d Guido Trotter
    # verify creation mode
5498 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
5499 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
5500 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
5501 7baf741d Guido Trotter
                                 self.op.mode)
5502 4b2f38dd Iustin Pop
5503 7baf741d Guido Trotter
    # disk template and mirror node verification
5504 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
5505 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
5506 7baf741d Guido Trotter
5507 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
5508 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
5509 4b2f38dd Iustin Pop
5510 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5511 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
5512 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
5513 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
5514 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
5515 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
5516 4b2f38dd Iustin Pop
5517 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
5518 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
5519 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
5520 8705eb96 Iustin Pop
                                  self.op.hvparams)
5521 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
5522 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
5523 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
5524 6785674e Iustin Pop
5525 338e51e8 Iustin Pop
    # fill and remember the beparams dict
5526 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
5527 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
5528 338e51e8 Iustin Pop
                                    self.op.beparams)
5529 338e51e8 Iustin Pop
5530 7baf741d Guido Trotter
    #### instance parameters check
5531 7baf741d Guido Trotter
5532 7baf741d Guido Trotter
    # instance name verification
5533 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
5534 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
5535 7baf741d Guido Trotter
5536 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
5537 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
5538 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
5539 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
5540 7baf741d Guido Trotter
                                 instance_name)
5541 7baf741d Guido Trotter
5542 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
5543 7baf741d Guido Trotter
5544 08db7c5c Iustin Pop
    # NIC buildup
5545 08db7c5c Iustin Pop
    self.nics = []
5546 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
5547 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
5548 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
5549 9dce4771 Guido Trotter
      if nic_mode is None:
5550 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
5551 9dce4771 Guido Trotter
5552 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
5553 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
5554 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
5555 9dce4771 Guido Trotter
      else:
5556 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
5557 9dce4771 Guido Trotter
5558 08db7c5c Iustin Pop
      # ip validity checks
5559 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
5560 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
5561 08db7c5c Iustin Pop
        nic_ip = None
5562 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
5563 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
5564 08db7c5c Iustin Pop
      else:
5565 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
5566 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
5567 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
5568 08db7c5c Iustin Pop
        nic_ip = ip
5569 08db7c5c Iustin Pop
5570 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
5571 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
5572 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
5573 9dce4771 Guido Trotter
5574 08db7c5c Iustin Pop
      # MAC address verification
5575 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
5576 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5577 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
5578 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
5579 08db7c5c Iustin Pop
                                     mac)
5580 87e43988 Iustin Pop
        else:
5581 87e43988 Iustin Pop
          # or validate/reserve the current one
5582 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
5583 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
5584 87e43988 Iustin Pop
                                       " in cluster" % mac)
5585 87e43988 Iustin Pop
5586 08db7c5c Iustin Pop
      # bridge verification
5587 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
5588 9dce4771 Guido Trotter
      link = nic.get("link", None)
5589 9dce4771 Guido Trotter
      if bridge and link:
5590 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5591 29921401 Iustin Pop
                                   " at the same time")
5592 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
5593 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
5594 9dce4771 Guido Trotter
      elif bridge:
5595 9dce4771 Guido Trotter
        link = bridge
5596 9dce4771 Guido Trotter
5597 9dce4771 Guido Trotter
      nicparams = {}
5598 9dce4771 Guido Trotter
      if nic_mode_req:
5599 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
5600 9dce4771 Guido Trotter
      if link:
5601 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
5602 9dce4771 Guido Trotter
5603 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
5604 9dce4771 Guido Trotter
                                      nicparams)
5605 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
5606 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
5607 08db7c5c Iustin Pop
5608 08db7c5c Iustin Pop
    # disk checks/pre-build
5609 08db7c5c Iustin Pop
    self.disks = []
5610 08db7c5c Iustin Pop
    for disk in self.op.disks:
5611 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
5612 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
5613 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
5614 08db7c5c Iustin Pop
                                   mode)
5615 08db7c5c Iustin Pop
      size = disk.get("size", None)
5616 08db7c5c Iustin Pop
      if size is None:
5617 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
5618 08db7c5c Iustin Pop
      try:
5619 08db7c5c Iustin Pop
        size = int(size)
5620 08db7c5c Iustin Pop
      except ValueError:
5621 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
5622 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
5623 08db7c5c Iustin Pop
5624 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
5625 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
5626 7baf741d Guido Trotter
5627 7baf741d Guido Trotter
    # file storage checks
5628 7baf741d Guido Trotter
    if (self.op.file_driver and
5629 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
5630 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
5631 7baf741d Guido Trotter
                                 self.op.file_driver)
5632 7baf741d Guido Trotter
5633 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
5634 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
5635 7baf741d Guido Trotter
5636 7baf741d Guido Trotter
    ### Node/iallocator related checks
5637 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
5638 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
5639 7baf741d Guido Trotter
                                 " node must be given")
5640 7baf741d Guido Trotter
5641 7baf741d Guido Trotter
    if self.op.iallocator:
5642 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5643 7baf741d Guido Trotter
    else:
5644 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
5645 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
5646 7baf741d Guido Trotter
      if self.op.snode is not None:
5647 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
5648 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
5649 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
5650 7baf741d Guido Trotter
5651 7baf741d Guido Trotter
    # in case of import lock the source node too
5652 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5653 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
5654 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
5655 7baf741d Guido Trotter
5656 b9322a9f Guido Trotter
      if src_path is None:
5657 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
5658 b9322a9f Guido Trotter
5659 b9322a9f Guido Trotter
      if src_node is None:
5660 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5661 b9322a9f Guido Trotter
        self.op.src_node = None
5662 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
5663 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
5664 b9322a9f Guido Trotter
                                     " path requires a source node option.")
5665 b9322a9f Guido Trotter
      else:
5666 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
5667 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
5668 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
5669 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
5670 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
5671 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
5672 7baf741d Guido Trotter
5673 f2c05717 Guido Trotter
      # On import force_variant must be True, because if we forced it at
5674 f2c05717 Guido Trotter
      # initial install, our only chance when importing it back is that it
5675 f2c05717 Guido Trotter
      # works again!
5676 f2c05717 Guido Trotter
      self.op.force_variant = True
5677 f2c05717 Guido Trotter
5678 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
5679 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
5680 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
5681 f2c05717 Guido Trotter
      self.op.force_variant = getattr(self.op, "force_variant", False)
5682 a8083063 Iustin Pop
5683 538475ca Iustin Pop
  def _RunAllocator(self):
5684 538475ca Iustin Pop
    """Run the allocator based on input opcode.
5685 538475ca Iustin Pop

5686 538475ca Iustin Pop
    """
5687 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
5688 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
5689 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
5690 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
5691 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
5692 d1c2dd75 Iustin Pop
                     tags=[],
5693 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
5694 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
5695 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
5696 08db7c5c Iustin Pop
                     disks=self.disks,
5697 d1c2dd75 Iustin Pop
                     nics=nics,
5698 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
5699 29859cb7 Iustin Pop
                     )
5700 d1c2dd75 Iustin Pop
5701 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
5702 d1c2dd75 Iustin Pop
5703 d1c2dd75 Iustin Pop
    if not ial.success:
5704 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5705 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5706 d1c2dd75 Iustin Pop
                                                           ial.info))
5707 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5708 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5709 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
5710 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
5711 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
5712 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
5713 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
5714 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
5715 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
5716 27579978 Iustin Pop
    if ial.required_nodes == 2:
5717 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
5718 538475ca Iustin Pop
5719 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5720 a8083063 Iustin Pop
    """Build hooks env.
5721 a8083063 Iustin Pop

5722 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
5723 a8083063 Iustin Pop

5724 a8083063 Iustin Pop
    """
5725 a8083063 Iustin Pop
    env = {
5726 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
5727 a8083063 Iustin Pop
      }
5728 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5729 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
5730 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
5731 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
5732 396e1b78 Michael Hanselmann
5733 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
5734 2c2690c9 Iustin Pop
      name=self.op.instance_name,
5735 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
5736 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
5737 4978db17 Iustin Pop
      status=self.op.start,
5738 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
5739 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
5740 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
5741 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
5742 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
5743 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
5744 67fc3042 Iustin Pop
      bep=self.be_full,
5745 67fc3042 Iustin Pop
      hvp=self.hv_full,
5746 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
5747 396e1b78 Michael Hanselmann
    ))
5748 a8083063 Iustin Pop
5749 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
5750 a8083063 Iustin Pop
          self.secondaries)
5751 a8083063 Iustin Pop
    return env, nl, nl
5752 a8083063 Iustin Pop
5753 a8083063 Iustin Pop
5754 a8083063 Iustin Pop
  def CheckPrereq(self):
5755 a8083063 Iustin Pop
    """Check prerequisites.
5756 a8083063 Iustin Pop

5757 a8083063 Iustin Pop
    """
5758 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
5759 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
5760 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
5761 eedc99de Manuel Franceschini
                                 " instances")
5762 eedc99de Manuel Franceschini
5763 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
5764 7baf741d Guido Trotter
      src_node = self.op.src_node
5765 7baf741d Guido Trotter
      src_path = self.op.src_path
5766 a8083063 Iustin Pop
5767 c0cbdc67 Guido Trotter
      if src_node is None:
5768 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
5769 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
5770 c0cbdc67 Guido Trotter
        found = False
5771 c0cbdc67 Guido Trotter
        for node in exp_list:
5772 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
5773 1b7bfbb7 Iustin Pop
            continue
5774 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
5775 c0cbdc67 Guido Trotter
            found = True
5776 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
5777 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
5778 c0cbdc67 Guido Trotter
                                                       src_path)
5779 c0cbdc67 Guido Trotter
            break
5780 c0cbdc67 Guido Trotter
        if not found:
5781 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
5782 c0cbdc67 Guido Trotter
                                      src_path)
5783 c0cbdc67 Guido Trotter
5784 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
5785 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
5786 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
5787 a8083063 Iustin Pop
5788 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
5789 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
5790 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
5791 a8083063 Iustin Pop
5792 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
5793 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
5794 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
5795 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
5796 a8083063 Iustin Pop
5797 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
5798 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
5799 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
5800 09acf207 Guido Trotter
      if instance_disks < export_disks:
5801 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
5802 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
5803 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
5804 a8083063 Iustin Pop
5805 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
5806 09acf207 Guido Trotter
      disk_images = []
5807 09acf207 Guido Trotter
      for idx in range(export_disks):
5808 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
5809 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
5810 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
5811 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
5812 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
5813 09acf207 Guido Trotter
          disk_images.append(image)
5814 09acf207 Guido Trotter
        else:
5815 09acf207 Guido Trotter
          disk_images.append(False)
5816 09acf207 Guido Trotter
5817 09acf207 Guido Trotter
      self.src_images = disk_images
5818 901a65c1 Iustin Pop
5819 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
5820 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
5821 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
5822 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
5823 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
5824 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
5825 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
5826 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
5827 bc89efc3 Guido Trotter
5828 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
5829 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
5830 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
5831 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
5832 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
5833 901a65c1 Iustin Pop
5834 901a65c1 Iustin Pop
    if self.op.ip_check:
5835 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
5836 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
5837 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
5838 901a65c1 Iustin Pop
5839 295728df Guido Trotter
    #### mac address generation
5840 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
5841 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
5842 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
5843 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
5844 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
5845 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
5846 295728df Guido Trotter
    # creation job will fail.
5847 295728df Guido Trotter
    for nic in self.nics:
5848 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5849 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
5850 295728df Guido Trotter
5851 538475ca Iustin Pop
    #### allocator run
5852 538475ca Iustin Pop
5853 538475ca Iustin Pop
    if self.op.iallocator is not None:
5854 538475ca Iustin Pop
      self._RunAllocator()
5855 0f1a06e3 Manuel Franceschini
5856 901a65c1 Iustin Pop
    #### node related checks
5857 901a65c1 Iustin Pop
5858 901a65c1 Iustin Pop
    # check primary node
5859 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
5860 7baf741d Guido Trotter
    assert self.pnode is not None, \
5861 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
5862 7527a8a4 Iustin Pop
    if pnode.offline:
5863 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
5864 7527a8a4 Iustin Pop
                                 pnode.name)
5865 733a2b6a Iustin Pop
    if pnode.drained:
5866 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
5867 733a2b6a Iustin Pop
                                 pnode.name)
5868 7527a8a4 Iustin Pop
5869 901a65c1 Iustin Pop
    self.secondaries = []
5870 901a65c1 Iustin Pop
5871 901a65c1 Iustin Pop
    # mirror node verification
5872 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
5873 7baf741d Guido Trotter
      if self.op.snode is None:
5874 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
5875 3ecf6786 Iustin Pop
                                   " a mirror node")
5876 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
5877 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
5878 3ecf6786 Iustin Pop
                                   " the primary node.")
5879 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
5880 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
5881 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
5882 a8083063 Iustin Pop
5883 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
5884 6785674e Iustin Pop
5885 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
5886 08db7c5c Iustin Pop
                                self.disks)
5887 ed1ebc60 Guido Trotter
5888 8d75db10 Iustin Pop
    # Check lv size requirements
5889 8d75db10 Iustin Pop
    if req_size is not None:
5890 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5891 72737a7f Iustin Pop
                                         self.op.hypervisor)
5892 8d75db10 Iustin Pop
      for node in nodenames:
5893 781de953 Iustin Pop
        info = nodeinfo[node]
5894 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
5895 070e998b Iustin Pop
        info = info.payload
5896 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
5897 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
5898 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
5899 8d75db10 Iustin Pop
                                     " node %s" % node)
5900 070e998b Iustin Pop
        if req_size > vg_free:
5901 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
5902 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
5903 070e998b Iustin Pop
                                     (node, vg_free, req_size))
5904 ed1ebc60 Guido Trotter
5905 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
5906 6785674e Iustin Pop
5907 a8083063 Iustin Pop
    # os verification
5908 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
5909 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
5910 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
5911 f2c05717 Guido Trotter
    if not self.op.force_variant:
5912 f2c05717 Guido Trotter
      _CheckOSVariant(result.payload, self.op.os_type)
5913 a8083063 Iustin Pop
5914 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
5915 a8083063 Iustin Pop
5916 49ce1563 Iustin Pop
    # memory check on primary node
5917 49ce1563 Iustin Pop
    if self.op.start:
5918 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
5919 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
5920 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
5921 338e51e8 Iustin Pop
                           self.op.hypervisor)
5922 49ce1563 Iustin Pop
5923 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
5924 08896026 Iustin Pop
5925 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5926 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
5927 a8083063 Iustin Pop

5928 a8083063 Iustin Pop
    """
5929 a8083063 Iustin Pop
    instance = self.op.instance_name
5930 a8083063 Iustin Pop
    pnode_name = self.pnode.name
5931 a8083063 Iustin Pop
5932 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
5933 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
5934 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
5935 2a6469d5 Alexander Schreiber
    else:
5936 2a6469d5 Alexander Schreiber
      network_port = None
5937 58acb49d Alexander Schreiber
5938 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
5939 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
5940 31a853d2 Iustin Pop
5941 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
5942 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
5943 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
5944 2c313123 Manuel Franceschini
    else:
5945 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
5946 2c313123 Manuel Franceschini
5947 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
5948 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
5949 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
5950 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
5951 0f1a06e3 Manuel Franceschini
5952 0f1a06e3 Manuel Franceschini
5953 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
5954 a8083063 Iustin Pop
                                  self.op.disk_template,
5955 a8083063 Iustin Pop
                                  instance, pnode_name,
5956 08db7c5c Iustin Pop
                                  self.secondaries,
5957 08db7c5c Iustin Pop
                                  self.disks,
5958 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
5959 e2a65344 Iustin Pop
                                  self.op.file_driver,
5960 e2a65344 Iustin Pop
                                  0)
5961 a8083063 Iustin Pop
5962 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
5963 a8083063 Iustin Pop
                            primary_node=pnode_name,
5964 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
5965 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
5966 4978db17 Iustin Pop
                            admin_up=False,
5967 58acb49d Alexander Schreiber
                            network_port=network_port,
5968 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
5969 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
5970 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5971 a8083063 Iustin Pop
                            )
5972 a8083063 Iustin Pop
5973 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5974 796cab27 Iustin Pop
    try:
5975 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5976 796cab27 Iustin Pop
    except errors.OpExecError:
5977 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5978 796cab27 Iustin Pop
      try:
5979 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5980 796cab27 Iustin Pop
      finally:
5981 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5982 796cab27 Iustin Pop
        raise
5983 a8083063 Iustin Pop
5984 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5985 a8083063 Iustin Pop
5986 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5987 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5988 7baf741d Guido Trotter
    # added the instance to the config
5989 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5990 e36e96b4 Guido Trotter
    # Unlock all the nodes
5991 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5992 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5993 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5994 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5995 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5996 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5997 9c8971d7 Guido Trotter
    else:
5998 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5999 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
6000 a8083063 Iustin Pop
6001 a8083063 Iustin Pop
    if self.op.wait_for_sync:
6002 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
6003 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
6004 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
6005 a8083063 Iustin Pop
      time.sleep(15)
6006 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
6007 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
6008 a8083063 Iustin Pop
    else:
6009 a8083063 Iustin Pop
      disk_abort = False
6010 a8083063 Iustin Pop
6011 a8083063 Iustin Pop
    if disk_abort:
6012 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
6013 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
6014 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
6015 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
6016 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
6017 3ecf6786 Iustin Pop
                               " this instance")
6018 a8083063 Iustin Pop
6019 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
6020 a8083063 Iustin Pop
                (instance, pnode_name))
6021 a8083063 Iustin Pop
6022 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
6023 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
6024 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
6025 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
6026 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
6027 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
6028 a8083063 Iustin Pop
6029 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
6030 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
6031 a8083063 Iustin Pop
        src_node = self.op.src_node
6032 09acf207 Guido Trotter
        src_images = self.src_images
6033 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
6034 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
6035 09acf207 Guido Trotter
                                                         src_node, src_images,
6036 6c0af70e Guido Trotter
                                                         cluster_name)
6037 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
6038 944bf548 Iustin Pop
        if msg:
6039 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
6040 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
6041 a8083063 Iustin Pop
      else:
6042 a8083063 Iustin Pop
        # also checked in the prereq part
6043 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
6044 3ecf6786 Iustin Pop
                                     % self.op.mode)
6045 a8083063 Iustin Pop
6046 a8083063 Iustin Pop
    if self.op.start:
6047 4978db17 Iustin Pop
      iobj.admin_up = True
6048 a4eae71f Michael Hanselmann
      self.cfg.Update(iobj, feedback_fn)
6049 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
6050 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
6051 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
6052 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
6053 a8083063 Iustin Pop
6054 08896026 Iustin Pop
    return list(iobj.all_nodes)
6055 08896026 Iustin Pop
6056 a8083063 Iustin Pop
6057 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
6058 a8083063 Iustin Pop
  """Connect to an instance's console.
6059 a8083063 Iustin Pop

6060 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
6061 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
6062 a8083063 Iustin Pop
  console.
6063 a8083063 Iustin Pop

6064 a8083063 Iustin Pop
  """
6065 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
6066 8659b73e Guido Trotter
  REQ_BGL = False
6067 8659b73e Guido Trotter
6068 8659b73e Guido Trotter
  def ExpandNames(self):
6069 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
6070 a8083063 Iustin Pop
6071 a8083063 Iustin Pop
  def CheckPrereq(self):
6072 a8083063 Iustin Pop
    """Check prerequisites.
6073 a8083063 Iustin Pop

6074 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
6075 a8083063 Iustin Pop

6076 a8083063 Iustin Pop
    """
6077 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6078 8659b73e Guido Trotter
    assert self.instance is not None, \
6079 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6080 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
6081 a8083063 Iustin Pop
6082 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6083 a8083063 Iustin Pop
    """Connect to the console of an instance
6084 a8083063 Iustin Pop

6085 a8083063 Iustin Pop
    """
6086 a8083063 Iustin Pop
    instance = self.instance
6087 a8083063 Iustin Pop
    node = instance.primary_node
6088 a8083063 Iustin Pop
6089 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
6090 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
6091 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
6092 a8083063 Iustin Pop
6093 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
6094 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
6095 a8083063 Iustin Pop
6096 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
6097 a8083063 Iustin Pop
6098 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
6099 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
6100 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
6101 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
6102 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
6103 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
6104 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
6105 b047857b Michael Hanselmann
6106 82122173 Iustin Pop
    # build ssh cmdline
6107 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
6108 a8083063 Iustin Pop
6109 a8083063 Iustin Pop
6110 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
6111 a8083063 Iustin Pop
  """Replace the disks of an instance.
6112 a8083063 Iustin Pop

6113 a8083063 Iustin Pop
  """
6114 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
6115 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6116 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
6117 efd990e4 Guido Trotter
  REQ_BGL = False
6118 efd990e4 Guido Trotter
6119 7e9366f7 Iustin Pop
  def CheckArguments(self):
6120 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
6121 efd990e4 Guido Trotter
      self.op.remote_node = None
6122 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
6123 7e9366f7 Iustin Pop
      self.op.iallocator = None
6124 7e9366f7 Iustin Pop
6125 c68174b6 Michael Hanselmann
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
6126 c68174b6 Michael Hanselmann
                                  self.op.iallocator)
6127 7e9366f7 Iustin Pop
6128 7e9366f7 Iustin Pop
  def ExpandNames(self):
6129 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
6130 7e9366f7 Iustin Pop
6131 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
6132 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6133 2bb5c911 Michael Hanselmann
6134 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
6135 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6136 efd990e4 Guido Trotter
      if remote_node is None:
6137 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
6138 efd990e4 Guido Trotter
                                   self.op.remote_node)
6139 2bb5c911 Michael Hanselmann
6140 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
6141 2bb5c911 Michael Hanselmann
6142 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
6143 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
6144 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
6145 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
6146 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6147 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6148 2bb5c911 Michael Hanselmann
6149 efd990e4 Guido Trotter
    else:
6150 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
6151 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6152 efd990e4 Guido Trotter
6153 c68174b6 Michael Hanselmann
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
6154 c68174b6 Michael Hanselmann
                                   self.op.iallocator, self.op.remote_node,
6155 c68174b6 Michael Hanselmann
                                   self.op.disks)
6156 c68174b6 Michael Hanselmann
6157 3a012b41 Michael Hanselmann
    self.tasklets = [self.replacer]
6158 2bb5c911 Michael Hanselmann
6159 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
6160 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
6161 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
6162 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
6163 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6164 efd990e4 Guido Trotter
      self._LockInstancesNodes()
6165 a8083063 Iustin Pop
6166 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6167 a8083063 Iustin Pop
    """Build hooks env.
6168 a8083063 Iustin Pop

6169 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6170 a8083063 Iustin Pop

6171 a8083063 Iustin Pop
    """
6172 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
6173 a8083063 Iustin Pop
    env = {
6174 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
6175 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
6176 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
6177 a8083063 Iustin Pop
      }
6178 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
6179 0834c866 Iustin Pop
    nl = [
6180 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6181 2bb5c911 Michael Hanselmann
      instance.primary_node,
6182 0834c866 Iustin Pop
      ]
6183 0834c866 Iustin Pop
    if self.op.remote_node is not None:
6184 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
6185 a8083063 Iustin Pop
    return env, nl, nl
6186 a8083063 Iustin Pop
6187 2bb5c911 Michael Hanselmann
6188 7ffc5a86 Michael Hanselmann
class LUEvacuateNode(LogicalUnit):
6189 7ffc5a86 Michael Hanselmann
  """Relocate the secondary instances from a node.
6190 7ffc5a86 Michael Hanselmann

6191 7ffc5a86 Michael Hanselmann
  """
6192 7ffc5a86 Michael Hanselmann
  HPATH = "node-evacuate"
6193 7ffc5a86 Michael Hanselmann
  HTYPE = constants.HTYPE_NODE
6194 7ffc5a86 Michael Hanselmann
  _OP_REQP = ["node_name"]
6195 7ffc5a86 Michael Hanselmann
  REQ_BGL = False
6196 7ffc5a86 Michael Hanselmann
6197 7ffc5a86 Michael Hanselmann
  def CheckArguments(self):
6198 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "remote_node"):
6199 7ffc5a86 Michael Hanselmann
      self.op.remote_node = None
6200 7ffc5a86 Michael Hanselmann
    if not hasattr(self.op, "iallocator"):
6201 7ffc5a86 Michael Hanselmann
      self.op.iallocator = None
6202 7ffc5a86 Michael Hanselmann
6203 7ffc5a86 Michael Hanselmann
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
6204 7ffc5a86 Michael Hanselmann
                                  self.op.remote_node,
6205 7ffc5a86 Michael Hanselmann
                                  self.op.iallocator)
6206 7ffc5a86 Michael Hanselmann
6207 7ffc5a86 Michael Hanselmann
  def ExpandNames(self):
6208 7ffc5a86 Michael Hanselmann
    self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
6209 7ffc5a86 Michael Hanselmann
    if self.op.node_name is None:
6210 7ffc5a86 Michael Hanselmann
      raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
6211 7ffc5a86 Michael Hanselmann
6212 7ffc5a86 Michael Hanselmann
    self.needed_locks = {}
6213 7ffc5a86 Michael Hanselmann
6214 7ffc5a86 Michael Hanselmann
    # Declare node locks
6215 7ffc5a86 Michael Hanselmann
    if self.op.iallocator is not None:
6216 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6217 7ffc5a86 Michael Hanselmann
6218 7ffc5a86 Michael Hanselmann
    elif self.op.remote_node is not None:
6219 7ffc5a86 Michael Hanselmann
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
6220 7ffc5a86 Michael Hanselmann
      if remote_node is None:
6221 7ffc5a86 Michael Hanselmann
        raise errors.OpPrereqError("Node '%s' not known" %
6222 7ffc5a86 Michael Hanselmann
                                   self.op.remote_node)
6223 7ffc5a86 Michael Hanselmann
6224 7ffc5a86 Michael Hanselmann
      self.op.remote_node = remote_node
6225 7ffc5a86 Michael Hanselmann
6226 7ffc5a86 Michael Hanselmann
      # Warning: do not remove the locking of the new secondary here
6227 7ffc5a86 Michael Hanselmann
      # unless DRBD8.AddChildren is changed to work in parallel;
6228 7ffc5a86 Michael Hanselmann
      # currently it doesn't since parallel invocations of
6229 7ffc5a86 Michael Hanselmann
      # FindUnusedMinor will conflict
6230 7ffc5a86 Michael Hanselmann
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
6231 7ffc5a86 Michael Hanselmann
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
6232 7ffc5a86 Michael Hanselmann
6233 7ffc5a86 Michael Hanselmann
    else:
6234 7ffc5a86 Michael Hanselmann
      raise errors.OpPrereqError("Invalid parameters")
6235 7ffc5a86 Michael Hanselmann
6236 7ffc5a86 Michael Hanselmann
    # Create tasklets for replacing disks for all secondary instances on this
6237 7ffc5a86 Michael Hanselmann
    # node
6238 7ffc5a86 Michael Hanselmann
    names = []
6239 3a012b41 Michael Hanselmann
    tasklets = []
6240 7ffc5a86 Michael Hanselmann
6241 7ffc5a86 Michael Hanselmann
    for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
6242 7ffc5a86 Michael Hanselmann
      logging.debug("Replacing disks for instance %s", inst.name)
6243 7ffc5a86 Michael Hanselmann
      names.append(inst.name)
6244 7ffc5a86 Michael Hanselmann
6245 7ffc5a86 Michael Hanselmann
      replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
6246 7ffc5a86 Michael Hanselmann
                                self.op.iallocator, self.op.remote_node, [])
6247 3a012b41 Michael Hanselmann
      tasklets.append(replacer)
6248 7ffc5a86 Michael Hanselmann
6249 3a012b41 Michael Hanselmann
    self.tasklets = tasklets
6250 7ffc5a86 Michael Hanselmann
    self.instance_names = names
6251 7ffc5a86 Michael Hanselmann
6252 7ffc5a86 Michael Hanselmann
    # Declare instance locks
6253 7ffc5a86 Michael Hanselmann
    self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
6254 7ffc5a86 Michael Hanselmann
6255 7ffc5a86 Michael Hanselmann
  def DeclareLocks(self, level):
6256 7ffc5a86 Michael Hanselmann
    # If we're not already locking all nodes in the set we have to declare the
6257 7ffc5a86 Michael Hanselmann
    # instance's primary/secondary nodes.
6258 7ffc5a86 Michael Hanselmann
    if (level == locking.LEVEL_NODE and
6259 7ffc5a86 Michael Hanselmann
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
6260 7ffc5a86 Michael Hanselmann
      self._LockInstancesNodes()
6261 7ffc5a86 Michael Hanselmann
6262 7ffc5a86 Michael Hanselmann
  def BuildHooksEnv(self):
6263 7ffc5a86 Michael Hanselmann
    """Build hooks env.
6264 7ffc5a86 Michael Hanselmann

6265 7ffc5a86 Michael Hanselmann
    This runs on the master, the primary and all the secondaries.
6266 7ffc5a86 Michael Hanselmann

6267 7ffc5a86 Michael Hanselmann
    """
6268 7ffc5a86 Michael Hanselmann
    env = {
6269 7ffc5a86 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
6270 7ffc5a86 Michael Hanselmann
      }
6271 7ffc5a86 Michael Hanselmann
6272 7ffc5a86 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
6273 7ffc5a86 Michael Hanselmann
6274 7ffc5a86 Michael Hanselmann
    if self.op.remote_node is not None:
6275 7ffc5a86 Michael Hanselmann
      env["NEW_SECONDARY"] = self.op.remote_node
6276 7ffc5a86 Michael Hanselmann
      nl.append(self.op.remote_node)
6277 7ffc5a86 Michael Hanselmann
6278 7ffc5a86 Michael Hanselmann
    return (env, nl, nl)
6279 7ffc5a86 Michael Hanselmann
6280 7ffc5a86 Michael Hanselmann
6281 c68174b6 Michael Hanselmann
class TLReplaceDisks(Tasklet):
6282 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
6283 2bb5c911 Michael Hanselmann

6284 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
6285 2bb5c911 Michael Hanselmann

6286 2bb5c911 Michael Hanselmann
  """
6287 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
6288 2bb5c911 Michael Hanselmann
               disks):
6289 2bb5c911 Michael Hanselmann
    """Initializes this class.
6290 2bb5c911 Michael Hanselmann

6291 2bb5c911 Michael Hanselmann
    """
6292 464243a7 Michael Hanselmann
    Tasklet.__init__(self, lu)
6293 464243a7 Michael Hanselmann
6294 2bb5c911 Michael Hanselmann
    # Parameters
6295 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
6296 2bb5c911 Michael Hanselmann
    self.mode = mode
6297 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
6298 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
6299 2bb5c911 Michael Hanselmann
    self.disks = disks
6300 2bb5c911 Michael Hanselmann
6301 2bb5c911 Michael Hanselmann
    # Runtime data
6302 2bb5c911 Michael Hanselmann
    self.instance = None
6303 2bb5c911 Michael Hanselmann
    self.new_node = None
6304 2bb5c911 Michael Hanselmann
    self.target_node = None
6305 2bb5c911 Michael Hanselmann
    self.other_node = None
6306 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
6307 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
6308 2bb5c911 Michael Hanselmann
6309 2bb5c911 Michael Hanselmann
  @staticmethod
6310 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
6311 c68174b6 Michael Hanselmann
    """Helper function for users of this class.
6312 c68174b6 Michael Hanselmann

6313 c68174b6 Michael Hanselmann
    """
6314 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
6315 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
6316 02a00186 Michael Hanselmann
      if remote_node is None and iallocator is None:
6317 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
6318 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
6319 2bb5c911 Michael Hanselmann
                                   " new node given")
6320 02a00186 Michael Hanselmann
6321 02a00186 Michael Hanselmann
      if remote_node is not None and iallocator is not None:
6322 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
6323 2bb5c911 Michael Hanselmann
                                   " secondary, not both")
6324 02a00186 Michael Hanselmann
6325 02a00186 Michael Hanselmann
    elif remote_node is not None or iallocator is not None:
6326 02a00186 Michael Hanselmann
      # Not replacing the secondary
6327 02a00186 Michael Hanselmann
      raise errors.OpPrereqError("The iallocator and new node options can"
6328 02a00186 Michael Hanselmann
                                 " only be used when changing the"
6329 02a00186 Michael Hanselmann
                                 " secondary node")
6330 2bb5c911 Michael Hanselmann
6331 2bb5c911 Michael Hanselmann
  @staticmethod
6332 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
6333 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
6334 2bb5c911 Michael Hanselmann

6335 2bb5c911 Michael Hanselmann
    """
6336 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
6337 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
6338 2bb5c911 Michael Hanselmann
                     name=instance_name,
6339 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
6340 2bb5c911 Michael Hanselmann
6341 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
6342 2bb5c911 Michael Hanselmann
6343 2bb5c911 Michael Hanselmann
    if not ial.success:
6344 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
6345 2bb5c911 Michael Hanselmann
                                 " %s" % (iallocator_name, ial.info))
6346 2bb5c911 Michael Hanselmann
6347 2bb5c911 Michael Hanselmann
    if len(ial.nodes) != ial.required_nodes:
6348 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
6349 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
6350 2bb5c911 Michael Hanselmann
                                 (len(ial.nodes), ial.required_nodes))
6351 2bb5c911 Michael Hanselmann
6352 2bb5c911 Michael Hanselmann
    remote_node_name = ial.nodes[0]
6353 2bb5c911 Michael Hanselmann
6354 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
6355 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
6356 2bb5c911 Michael Hanselmann
6357 2bb5c911 Michael Hanselmann
    return remote_node_name
6358 2bb5c911 Michael Hanselmann
6359 942be002 Michael Hanselmann
  def _FindFaultyDisks(self, node_name):
6360 2d9005d8 Michael Hanselmann
    return _FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
6361 2d9005d8 Michael Hanselmann
                                    node_name, True)
6362 942be002 Michael Hanselmann
6363 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
6364 2bb5c911 Michael Hanselmann
    """Check prerequisites.
6365 2bb5c911 Michael Hanselmann

6366 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
6367 2bb5c911 Michael Hanselmann

6368 2bb5c911 Michael Hanselmann
    """
6369 e9022531 Iustin Pop
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
6370 e9022531 Iustin Pop
    assert instance is not None, \
6371 20eca47d Iustin Pop
      "Cannot retrieve locked instance %s" % self.instance_name
6372 2bb5c911 Michael Hanselmann
6373 e9022531 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
6374 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
6375 7e9366f7 Iustin Pop
                                 " instances")
6376 a8083063 Iustin Pop
6377 e9022531 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6378 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
6379 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
6380 e9022531 Iustin Pop
                                 len(instance.secondary_nodes))
6381 a8083063 Iustin Pop
6382 e9022531 Iustin Pop
    secondary_node = instance.secondary_nodes[0]
6383 a9e0c397 Iustin Pop
6384 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
6385 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
6386 2bb5c911 Michael Hanselmann
    else:
6387 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
6388 e9022531 Iustin Pop
                                       instance.name, instance.secondary_nodes)
6389 b6e82a65 Iustin Pop
6390 a9e0c397 Iustin Pop
    if remote_node is not None:
6391 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
6392 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
6393 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
6394 a9e0c397 Iustin Pop
    else:
6395 a9e0c397 Iustin Pop
      self.remote_node_info = None
6396 2bb5c911 Michael Hanselmann
6397 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
6398 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
6399 3ecf6786 Iustin Pop
                                 " the instance.")
6400 2bb5c911 Michael Hanselmann
6401 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
6402 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
6403 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
6404 7e9366f7 Iustin Pop
6405 2945fd2d Michael Hanselmann
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
6406 2945fd2d Michael Hanselmann
                                    constants.REPLACE_DISK_CHG):
6407 2945fd2d Michael Hanselmann
      raise errors.OpPrereqError("Cannot specify disks to be replaced")
6408 942be002 Michael Hanselmann
6409 2945fd2d Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_AUTO:
6410 e9022531 Iustin Pop
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
6411 942be002 Michael Hanselmann
      faulty_secondary = self._FindFaultyDisks(secondary_node)
6412 942be002 Michael Hanselmann
6413 942be002 Michael Hanselmann
      if faulty_primary and faulty_secondary:
6414 942be002 Michael Hanselmann
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
6415 942be002 Michael Hanselmann
                                   " one node and can not be repaired"
6416 942be002 Michael Hanselmann
                                   " automatically" % self.instance_name)
6417 942be002 Michael Hanselmann
6418 942be002 Michael Hanselmann
      if faulty_primary:
6419 942be002 Michael Hanselmann
        self.disks = faulty_primary
6420 e9022531 Iustin Pop
        self.target_node = instance.primary_node
6421 942be002 Michael Hanselmann
        self.other_node = secondary_node
6422 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6423 942be002 Michael Hanselmann
      elif faulty_secondary:
6424 942be002 Michael Hanselmann
        self.disks = faulty_secondary
6425 942be002 Michael Hanselmann
        self.target_node = secondary_node
6426 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6427 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6428 942be002 Michael Hanselmann
      else:
6429 942be002 Michael Hanselmann
        self.disks = []
6430 942be002 Michael Hanselmann
        check_nodes = []
6431 942be002 Michael Hanselmann
6432 942be002 Michael Hanselmann
    else:
6433 942be002 Michael Hanselmann
      # Non-automatic modes
6434 942be002 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_PRI:
6435 e9022531 Iustin Pop
        self.target_node = instance.primary_node
6436 942be002 Michael Hanselmann
        self.other_node = secondary_node
6437 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6438 7e9366f7 Iustin Pop
6439 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_SEC:
6440 942be002 Michael Hanselmann
        self.target_node = secondary_node
6441 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6442 942be002 Michael Hanselmann
        check_nodes = [self.target_node, self.other_node]
6443 a9e0c397 Iustin Pop
6444 942be002 Michael Hanselmann
      elif self.mode == constants.REPLACE_DISK_CHG:
6445 942be002 Michael Hanselmann
        self.new_node = remote_node
6446 e9022531 Iustin Pop
        self.other_node = instance.primary_node
6447 942be002 Michael Hanselmann
        self.target_node = secondary_node
6448 942be002 Michael Hanselmann
        check_nodes = [self.new_node, self.other_node]
6449 54155f52 Iustin Pop
6450 942be002 Michael Hanselmann
        _CheckNodeNotDrained(self.lu, remote_node)
6451 a8083063 Iustin Pop
6452 942be002 Michael Hanselmann
      else:
6453 942be002 Michael Hanselmann
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
6454 942be002 Michael Hanselmann
                                     self.mode)
6455 942be002 Michael Hanselmann
6456 942be002 Michael Hanselmann
      # If not specified all disks should be replaced
6457 942be002 Michael Hanselmann
      if not self.disks:
6458 942be002 Michael Hanselmann
        self.disks = range(len(self.instance.disks))
6459 a9e0c397 Iustin Pop
6460 2bb5c911 Michael Hanselmann
    for node in check_nodes:
6461 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
6462 e4376078 Iustin Pop
6463 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
6464 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
6465 e9022531 Iustin Pop
      instance.FindDisk(disk_idx)
6466 e4376078 Iustin Pop
6467 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
6468 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
6469 e4376078 Iustin Pop
6470 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
6471 2bb5c911 Michael Hanselmann
      if node_name is not None:
6472 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
6473 e4376078 Iustin Pop
6474 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
6475 a9e0c397 Iustin Pop
6476 c68174b6 Michael Hanselmann
  def Exec(self, feedback_fn):
6477 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
6478 2bb5c911 Michael Hanselmann

6479 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
6480 cff90b79 Iustin Pop

6481 a9e0c397 Iustin Pop
    """
6482 942be002 Michael Hanselmann
    if not self.disks:
6483 942be002 Michael Hanselmann
      feedback_fn("No disks need replacement")
6484 942be002 Michael Hanselmann
      return
6485 942be002 Michael Hanselmann
6486 942be002 Michael Hanselmann
    feedback_fn("Replacing disk(s) %s for %s" %
6487 942be002 Michael Hanselmann
                (", ".join([str(i) for i in self.disks]), self.instance.name))
6488 7ffc5a86 Michael Hanselmann
6489 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
6490 2bb5c911 Michael Hanselmann
6491 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
6492 2bb5c911 Michael Hanselmann
    if activate_disks:
6493 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
6494 2bb5c911 Michael Hanselmann
6495 2bb5c911 Michael Hanselmann
    try:
6496 942be002 Michael Hanselmann
      # Should we replace the secondary node?
6497 942be002 Michael Hanselmann
      if self.new_node is not None:
6498 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8Secondary
6499 2bb5c911 Michael Hanselmann
      else:
6500 a4eae71f Michael Hanselmann
        fn = self._ExecDrbd8DiskOnly
6501 a4eae71f Michael Hanselmann
6502 a4eae71f Michael Hanselmann
      return fn(feedback_fn)
6503 2bb5c911 Michael Hanselmann
6504 2bb5c911 Michael Hanselmann
    finally:
6505 2bb5c911 Michael Hanselmann
      # Deactivate the instance disks if we're replacing them on a down instance
6506 2bb5c911 Michael Hanselmann
      if activate_disks:
6507 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
6508 2bb5c911 Michael Hanselmann
6509 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
6510 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
6511 2bb5c911 Michael Hanselmann
6512 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
6513 cff90b79 Iustin Pop
6514 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
6515 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
6516 cff90b79 Iustin Pop
    if not results:
6517 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
6518 2bb5c911 Michael Hanselmann
6519 2bb5c911 Michael Hanselmann
    for node in nodes:
6520 781de953 Iustin Pop
      res = results[node]
6521 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
6522 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
6523 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
6524 2bb5c911 Michael Hanselmann
                                 (vgname, node))
6525 2bb5c911 Michael Hanselmann
6526 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
6527 2bb5c911 Michael Hanselmann
    # Check disk existence
6528 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6529 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6530 cff90b79 Iustin Pop
        continue
6531 2bb5c911 Michael Hanselmann
6532 2bb5c911 Michael Hanselmann
      for node in nodes:
6533 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
6534 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
6535 2bb5c911 Michael Hanselmann
6536 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
6537 2bb5c911 Michael Hanselmann
6538 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6539 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
6540 2bb5c911 Michael Hanselmann
          if not msg:
6541 2bb5c911 Michael Hanselmann
            msg = "disk not found"
6542 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
6543 23829f6f Iustin Pop
                                   (idx, node, msg))
6544 cff90b79 Iustin Pop
6545 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
6546 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6547 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6548 cff90b79 Iustin Pop
        continue
6549 cff90b79 Iustin Pop
6550 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
6551 2bb5c911 Michael Hanselmann
                      (idx, node_name))
6552 2bb5c911 Michael Hanselmann
6553 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
6554 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
6555 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
6556 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
6557 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
6558 2bb5c911 Michael Hanselmann
6559 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
6560 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
6561 2bb5c911 Michael Hanselmann
    iv_names = {}
6562 2bb5c911 Michael Hanselmann
6563 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6564 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
6565 a9e0c397 Iustin Pop
        continue
6566 2bb5c911 Michael Hanselmann
6567 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
6568 2bb5c911 Michael Hanselmann
6569 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6570 2bb5c911 Michael Hanselmann
6571 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
6572 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
6573 2bb5c911 Michael Hanselmann
6574 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
6575 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
6576 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
6577 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
6578 2bb5c911 Michael Hanselmann
6579 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
6580 a9e0c397 Iustin Pop
      old_lvs = dev.children
6581 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
6582 2bb5c911 Michael Hanselmann
6583 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
6584 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
6585 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
6586 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6587 2bb5c911 Michael Hanselmann
6588 2bb5c911 Michael Hanselmann
    return iv_names
6589 2bb5c911 Michael Hanselmann
6590 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
6591 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
6592 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
6593 2bb5c911 Michael Hanselmann
6594 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
6595 2bb5c911 Michael Hanselmann
6596 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
6597 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
6598 2bb5c911 Michael Hanselmann
        if not msg:
6599 2bb5c911 Michael Hanselmann
          msg = "disk not found"
6600 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
6601 2bb5c911 Michael Hanselmann
                                 (name, msg))
6602 2bb5c911 Michael Hanselmann
6603 96acbc09 Michael Hanselmann
      if result.payload.is_degraded:
6604 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
6605 2bb5c911 Michael Hanselmann
6606 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
6607 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, _) in iv_names.iteritems():
6608 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
6609 2bb5c911 Michael Hanselmann
6610 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
6611 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
6612 2bb5c911 Michael Hanselmann
6613 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
6614 2bb5c911 Michael Hanselmann
        if msg:
6615 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
6616 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
6617 2bb5c911 Michael Hanselmann
6618 a4eae71f Michael Hanselmann
  def _ExecDrbd8DiskOnly(self, feedback_fn):
6619 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
6620 2bb5c911 Michael Hanselmann

6621 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
6622 2bb5c911 Michael Hanselmann

6623 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
6624 2bb5c911 Michael Hanselmann

6625 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
6626 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
6627 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
6628 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
6629 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
6630 2bb5c911 Michael Hanselmann

6631 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
6632 2bb5c911 Michael Hanselmann

6633 2bb5c911 Michael Hanselmann
      1. for each modified disk:
6634 2bb5c911 Michael Hanselmann

6635 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
6636 2bb5c911 Michael Hanselmann

6637 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
6638 2bb5c911 Michael Hanselmann

6639 2bb5c911 Michael Hanselmann
    """
6640 2bb5c911 Michael Hanselmann
    steps_total = 6
6641 2bb5c911 Michael Hanselmann
6642 2bb5c911 Michael Hanselmann
    # Step: check device activation
6643 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6644 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
6645 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
6646 2bb5c911 Michael Hanselmann
6647 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
6648 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6649 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
6650 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
6651 2bb5c911 Michael Hanselmann
                                False)
6652 2bb5c911 Michael Hanselmann
6653 2bb5c911 Michael Hanselmann
    # Step: create new storage
6654 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6655 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
6656 a9e0c397 Iustin Pop
6657 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
6658 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6659 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6660 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6661 2bb5c911 Michael Hanselmann
6662 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6663 4d4a651d Michael Hanselmann
                                                     old_lvs)
6664 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
6665 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
6666 cff90b79 Iustin Pop
      #dev.children = []
6667 cff90b79 Iustin Pop
      #cfg.Update(instance)
6668 a9e0c397 Iustin Pop
6669 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
6670 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
6671 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
6672 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
6673 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
6674 cff90b79 Iustin Pop
6675 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
6676 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
6677 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
6678 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
6679 2bb5c911 Michael Hanselmann
6680 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
6681 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
6682 cff90b79 Iustin Pop
      for to_ren in old_lvs:
6683 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
6684 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
6685 23829f6f Iustin Pop
          # device exists
6686 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6687 cff90b79 Iustin Pop
6688 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
6689 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6690 4d4a651d Michael Hanselmann
                                             rename_old_to_new)
6691 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6692 2bb5c911 Michael Hanselmann
6693 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
6694 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
6695 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
6696 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
6697 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node,
6698 4d4a651d Michael Hanselmann
                                             rename_new_to_old)
6699 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6700 cff90b79 Iustin Pop
6701 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
6702 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
6703 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
6704 a9e0c397 Iustin Pop
6705 cff90b79 Iustin Pop
      for disk in old_lvs:
6706 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
6707 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
6708 a9e0c397 Iustin Pop
6709 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
6710 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
6711 4d4a651d Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
6712 4d4a651d Michael Hanselmann
                                                  new_lvs)
6713 4c4e4e1e Iustin Pop
      msg = result.fail_msg
6714 2cc1da8b Iustin Pop
      if msg:
6715 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
6716 4d4a651d Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
6717 4d4a651d Michael Hanselmann
                                               new_lv).fail_msg
6718 4c4e4e1e Iustin Pop
          if msg2:
6719 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
6720 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
6721 2bb5c911 Michael Hanselmann
                                     "volumes"))
6722 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
6723 a9e0c397 Iustin Pop
6724 a9e0c397 Iustin Pop
      dev.children = new_lvs
6725 a9e0c397 Iustin Pop
6726 a4eae71f Michael Hanselmann
      self.cfg.Update(self.instance, feedback_fn)
6727 a9e0c397 Iustin Pop
6728 2bb5c911 Michael Hanselmann
    # Wait for sync
6729 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6730 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6731 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6732 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6733 a9e0c397 Iustin Pop
6734 2bb5c911 Michael Hanselmann
    # Check all devices manually
6735 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6736 a9e0c397 Iustin Pop
6737 cff90b79 Iustin Pop
    # Step: remove old storage
6738 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6739 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6740 a9e0c397 Iustin Pop
6741 a4eae71f Michael Hanselmann
  def _ExecDrbd8Secondary(self, feedback_fn):
6742 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
6743 a9e0c397 Iustin Pop

6744 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
6745 a9e0c397 Iustin Pop
      - for all disks of the instance:
6746 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
6747 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
6748 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
6749 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
6750 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
6751 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
6752 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
6753 a9e0c397 Iustin Pop
          not network enabled
6754 a9e0c397 Iustin Pop
      - wait for sync across all devices
6755 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
6756 a9e0c397 Iustin Pop

6757 a9e0c397 Iustin Pop
    Failures are not very well handled.
6758 0834c866 Iustin Pop

6759 a9e0c397 Iustin Pop
    """
6760 0834c866 Iustin Pop
    steps_total = 6
6761 0834c866 Iustin Pop
6762 0834c866 Iustin Pop
    # Step: check device activation
6763 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
6764 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
6765 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
6766 0834c866 Iustin Pop
6767 0834c866 Iustin Pop
    # Step: check other node consistency
6768 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
6769 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
6770 0834c866 Iustin Pop
6771 0834c866 Iustin Pop
    # Step: create new storage
6772 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
6773 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6774 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
6775 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
6776 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
6777 a9e0c397 Iustin Pop
      for new_lv in dev.children:
6778 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
6779 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
6780 a9e0c397 Iustin Pop
6781 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
6782 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
6783 a1578d63 Iustin Pop
    # error and the success paths
6784 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6785 4d4a651d Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node
6786 4d4a651d Michael Hanselmann
                                         for dev in self.instance.disks],
6787 2bb5c911 Michael Hanselmann
                                        self.instance.name)
6788 2bb5c911 Michael Hanselmann
    logging.debug("Allocated minors %r" % (minors,))
6789 2bb5c911 Michael Hanselmann
6790 2bb5c911 Michael Hanselmann
    iv_names = {}
6791 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
6792 4d4a651d Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
6793 4d4a651d Michael Hanselmann
                      (self.new_node, idx))
6794 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
6795 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
6796 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
6797 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
6798 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
6799 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
6800 a2d59d8b Iustin Pop
        p_minor = o_minor1
6801 ffa1c0dc Iustin Pop
      else:
6802 a2d59d8b Iustin Pop
        p_minor = o_minor2
6803 a2d59d8b Iustin Pop
6804 4d4a651d Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None,
6805 4d4a651d Michael Hanselmann
                      p_minor, new_minor, o_secret)
6806 4d4a651d Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
6807 4d4a651d Michael Hanselmann
                    p_minor, new_minor, o_secret)
6808 a2d59d8b Iustin Pop
6809 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
6810 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
6811 a2d59d8b Iustin Pop
                    new_net_id)
6812 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
6813 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
6814 8a6c7011 Iustin Pop
                              children=dev.children,
6815 8a6c7011 Iustin Pop
                              size=dev.size)
6816 796cab27 Iustin Pop
      try:
6817 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
6818 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
6819 82759cb1 Iustin Pop
      except errors.GenericError:
6820 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
6821 796cab27 Iustin Pop
        raise
6822 a9e0c397 Iustin Pop
6823 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
6824 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
6825 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
6826 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
6827 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
6828 cacfd1fd Iustin Pop
      if msg:
6829 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
6830 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
6831 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
6832 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
6833 a9e0c397 Iustin Pop
6834 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
6835 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
6836 4d4a651d Michael Hanselmann
                                               self.node_secondary_ip,
6837 4d4a651d Michael Hanselmann
                                               self.instance.disks)\
6838 4d4a651d Michael Hanselmann
                                              [self.instance.primary_node]
6839 642445d9 Iustin Pop
6840 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6841 a2d59d8b Iustin Pop
    if msg:
6842 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
6843 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
6844 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
6845 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
6846 642445d9 Iustin Pop
6847 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
6848 642445d9 Iustin Pop
    # the instance to point to the new secondary
6849 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
6850 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
6851 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
6852 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
6853 2bb5c911 Michael Hanselmann
6854 a4eae71f Michael Hanselmann
    self.cfg.Update(self.instance, feedback_fn)
6855 a9e0c397 Iustin Pop
6856 642445d9 Iustin Pop
    # and now perform the drbd attach
6857 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
6858 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
6859 4d4a651d Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
6860 4d4a651d Michael Hanselmann
                                            self.new_node],
6861 4d4a651d Michael Hanselmann
                                           self.node_secondary_ip,
6862 4d4a651d Michael Hanselmann
                                           self.instance.disks,
6863 4d4a651d Michael Hanselmann
                                           self.instance.name,
6864 a2d59d8b Iustin Pop
                                           False)
6865 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
6866 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
6867 a2d59d8b Iustin Pop
      if msg:
6868 4d4a651d Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
6869 4d4a651d Michael Hanselmann
                           to_node, msg,
6870 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
6871 2bb5c911 Michael Hanselmann
                                 " status of disks"))
6872 a9e0c397 Iustin Pop
6873 2bb5c911 Michael Hanselmann
    # Wait for sync
6874 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
6875 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
6876 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
6877 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
6878 a9e0c397 Iustin Pop
6879 2bb5c911 Michael Hanselmann
    # Check all devices manually
6880 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
6881 22985314 Guido Trotter
6882 2bb5c911 Michael Hanselmann
    # Step: remove old storage
6883 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
6884 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
6885 a9e0c397 Iustin Pop
6886 a8083063 Iustin Pop
6887 76aef8fc Michael Hanselmann
class LURepairNodeStorage(NoHooksLU):
6888 76aef8fc Michael Hanselmann
  """Repairs the volume group on a node.
6889 76aef8fc Michael Hanselmann

6890 76aef8fc Michael Hanselmann
  """
6891 76aef8fc Michael Hanselmann
  _OP_REQP = ["node_name"]
6892 76aef8fc Michael Hanselmann
  REQ_BGL = False
6893 76aef8fc Michael Hanselmann
6894 76aef8fc Michael Hanselmann
  def CheckArguments(self):
6895 76aef8fc Michael Hanselmann
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
6896 76aef8fc Michael Hanselmann
    if node_name is None:
6897 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
6898 76aef8fc Michael Hanselmann
6899 76aef8fc Michael Hanselmann
    self.op.node_name = node_name
6900 76aef8fc Michael Hanselmann
6901 76aef8fc Michael Hanselmann
  def ExpandNames(self):
6902 76aef8fc Michael Hanselmann
    self.needed_locks = {
6903 76aef8fc Michael Hanselmann
      locking.LEVEL_NODE: [self.op.node_name],
6904 76aef8fc Michael Hanselmann
      }
6905 76aef8fc Michael Hanselmann
6906 76aef8fc Michael Hanselmann
  def _CheckFaultyDisks(self, instance, node_name):
6907 76aef8fc Michael Hanselmann
    if _FindFaultyInstanceDisks(self.cfg, self.rpc, instance,
6908 76aef8fc Michael Hanselmann
                                node_name, True):
6909 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Instance '%s' has faulty disks on"
6910 aa053071 Michael Hanselmann
                                 " node '%s'" % (instance.name, node_name))
6911 76aef8fc Michael Hanselmann
6912 76aef8fc Michael Hanselmann
  def CheckPrereq(self):
6913 76aef8fc Michael Hanselmann
    """Check prerequisites.
6914 76aef8fc Michael Hanselmann

6915 76aef8fc Michael Hanselmann
    """
6916 76aef8fc Michael Hanselmann
    storage_type = self.op.storage_type
6917 76aef8fc Michael Hanselmann
6918 76aef8fc Michael Hanselmann
    if (constants.SO_FIX_CONSISTENCY not in
6919 76aef8fc Michael Hanselmann
        constants.VALID_STORAGE_OPERATIONS.get(storage_type, [])):
6920 76aef8fc Michael Hanselmann
      raise errors.OpPrereqError("Storage units of type '%s' can not be"
6921 76aef8fc Michael Hanselmann
                                 " repaired" % storage_type)
6922 76aef8fc Michael Hanselmann
6923 76aef8fc Michael Hanselmann
    # Check whether any instance on this node has faulty disks
6924 76aef8fc Michael Hanselmann
    for inst in _GetNodeInstances(self.cfg, self.op.node_name):
6925 76aef8fc Michael Hanselmann
      check_nodes = set(inst.all_nodes)
6926 76aef8fc Michael Hanselmann
      check_nodes.discard(self.op.node_name)
6927 76aef8fc Michael Hanselmann
      for inst_node_name in check_nodes:
6928 76aef8fc Michael Hanselmann
        self._CheckFaultyDisks(inst, inst_node_name)
6929 76aef8fc Michael Hanselmann
6930 76aef8fc Michael Hanselmann
  def Exec(self, feedback_fn):
6931 76aef8fc Michael Hanselmann
    feedback_fn("Repairing storage unit '%s' on %s ..." %
6932 76aef8fc Michael Hanselmann
                (self.op.name, self.op.node_name))
6933 76aef8fc Michael Hanselmann
6934 76aef8fc Michael Hanselmann
    st_args = _GetStorageTypeArgs(self.cfg, self.op.storage_type)
6935 76aef8fc Michael Hanselmann
    result = self.rpc.call_storage_execute(self.op.node_name,
6936 76aef8fc Michael Hanselmann
                                           self.op.storage_type, st_args,
6937 76aef8fc Michael Hanselmann
                                           self.op.name,
6938 76aef8fc Michael Hanselmann
                                           constants.SO_FIX_CONSISTENCY)
6939 76aef8fc Michael Hanselmann
    result.Raise("Failed to repair storage unit '%s' on %s" %
6940 76aef8fc Michael Hanselmann
                 (self.op.name, self.op.node_name))
6941 76aef8fc Michael Hanselmann
6942 76aef8fc Michael Hanselmann
6943 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
6944 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
6945 8729e0d7 Iustin Pop

6946 8729e0d7 Iustin Pop
  """
6947 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
6948 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6949 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
6950 31e63dbf Guido Trotter
  REQ_BGL = False
6951 31e63dbf Guido Trotter
6952 31e63dbf Guido Trotter
  def ExpandNames(self):
6953 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
6954 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
6955 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6956 31e63dbf Guido Trotter
6957 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
6958 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
6959 31e63dbf Guido Trotter
      self._LockInstancesNodes()
6960 8729e0d7 Iustin Pop
6961 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
6962 8729e0d7 Iustin Pop
    """Build hooks env.
6963 8729e0d7 Iustin Pop

6964 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
6965 8729e0d7 Iustin Pop

6966 8729e0d7 Iustin Pop
    """
6967 8729e0d7 Iustin Pop
    env = {
6968 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
6969 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
6970 8729e0d7 Iustin Pop
      }
6971 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6972 8729e0d7 Iustin Pop
    nl = [
6973 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
6974 8729e0d7 Iustin Pop
      self.instance.primary_node,
6975 8729e0d7 Iustin Pop
      ]
6976 8729e0d7 Iustin Pop
    return env, nl, nl
6977 8729e0d7 Iustin Pop
6978 8729e0d7 Iustin Pop
  def CheckPrereq(self):
6979 8729e0d7 Iustin Pop
    """Check prerequisites.
6980 8729e0d7 Iustin Pop

6981 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
6982 8729e0d7 Iustin Pop

6983 8729e0d7 Iustin Pop
    """
6984 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6985 31e63dbf Guido Trotter
    assert instance is not None, \
6986 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6987 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
6988 6b12959c Iustin Pop
    for node in nodenames:
6989 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
6990 7527a8a4 Iustin Pop
6991 31e63dbf Guido Trotter
6992 8729e0d7 Iustin Pop
    self.instance = instance
6993 8729e0d7 Iustin Pop
6994 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
6995 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
6996 8729e0d7 Iustin Pop
                                 " growing.")
6997 8729e0d7 Iustin Pop
6998 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
6999 8729e0d7 Iustin Pop
7000 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
7001 72737a7f Iustin Pop
                                       instance.hypervisor)
7002 8729e0d7 Iustin Pop
    for node in nodenames:
7003 781de953 Iustin Pop
      info = nodeinfo[node]
7004 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
7005 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
7006 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
7007 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
7008 8729e0d7 Iustin Pop
                                   " node %s" % node)
7009 781de953 Iustin Pop
      if self.op.amount > vg_free:
7010 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
7011 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
7012 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
7013 8729e0d7 Iustin Pop
7014 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
7015 8729e0d7 Iustin Pop
    """Execute disk grow.
7016 8729e0d7 Iustin Pop

7017 8729e0d7 Iustin Pop
    """
7018 8729e0d7 Iustin Pop
    instance = self.instance
7019 ad24e046 Iustin Pop
    disk = self.disk
7020 6b12959c Iustin Pop
    for node in instance.all_nodes:
7021 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
7022 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
7023 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
7024 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
7025 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7026 6605411d Iustin Pop
    if self.op.wait_for_sync:
7027 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
7028 6605411d Iustin Pop
      if disk_abort:
7029 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
7030 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
7031 8729e0d7 Iustin Pop
7032 8729e0d7 Iustin Pop
7033 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
7034 a8083063 Iustin Pop
  """Query runtime instance data.
7035 a8083063 Iustin Pop

7036 a8083063 Iustin Pop
  """
7037 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
7038 a987fa48 Guido Trotter
  REQ_BGL = False
7039 ae5849b5 Michael Hanselmann
7040 a987fa48 Guido Trotter
  def ExpandNames(self):
7041 a987fa48 Guido Trotter
    self.needed_locks = {}
7042 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
7043 a987fa48 Guido Trotter
7044 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
7045 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
7046 a987fa48 Guido Trotter
7047 a987fa48 Guido Trotter
    if self.op.instances:
7048 a987fa48 Guido Trotter
      self.wanted_names = []
7049 a987fa48 Guido Trotter
      for name in self.op.instances:
7050 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
7051 a987fa48 Guido Trotter
        if full_name is None:
7052 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
7053 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
7054 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
7055 a987fa48 Guido Trotter
    else:
7056 a987fa48 Guido Trotter
      self.wanted_names = None
7057 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
7058 a987fa48 Guido Trotter
7059 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
7060 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7061 a987fa48 Guido Trotter
7062 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
7063 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
7064 a987fa48 Guido Trotter
      self._LockInstancesNodes()
7065 a8083063 Iustin Pop
7066 a8083063 Iustin Pop
  def CheckPrereq(self):
7067 a8083063 Iustin Pop
    """Check prerequisites.
7068 a8083063 Iustin Pop

7069 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
7070 a8083063 Iustin Pop

7071 a8083063 Iustin Pop
    """
7072 a987fa48 Guido Trotter
    if self.wanted_names is None:
7073 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
7074 a8083063 Iustin Pop
7075 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
7076 a987fa48 Guido Trotter
                             in self.wanted_names]
7077 a987fa48 Guido Trotter
    return
7078 a8083063 Iustin Pop
7079 98825740 Michael Hanselmann
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
7080 98825740 Michael Hanselmann
    """Returns the status of a block device
7081 98825740 Michael Hanselmann

7082 98825740 Michael Hanselmann
    """
7083 4dce1a83 Michael Hanselmann
    if self.op.static or not node:
7084 98825740 Michael Hanselmann
      return None
7085 98825740 Michael Hanselmann
7086 98825740 Michael Hanselmann
    self.cfg.SetDiskID(dev, node)
7087 98825740 Michael Hanselmann
7088 98825740 Michael Hanselmann
    result = self.rpc.call_blockdev_find(node, dev)
7089 98825740 Michael Hanselmann
    if result.offline:
7090 98825740 Michael Hanselmann
      return None
7091 98825740 Michael Hanselmann
7092 98825740 Michael Hanselmann
    result.Raise("Can't compute disk status for %s" % instance_name)
7093 98825740 Michael Hanselmann
7094 98825740 Michael Hanselmann
    status = result.payload
7095 ddfe2228 Michael Hanselmann
    if status is None:
7096 ddfe2228 Michael Hanselmann
      return None
7097 98825740 Michael Hanselmann
7098 98825740 Michael Hanselmann
    return (status.dev_path, status.major, status.minor,
7099 98825740 Michael Hanselmann
            status.sync_percent, status.estimated_time,
7100 f208978a Michael Hanselmann
            status.is_degraded, status.ldisk_status)
7101 98825740 Michael Hanselmann
7102 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
7103 a8083063 Iustin Pop
    """Compute block device status.
7104 a8083063 Iustin Pop

7105 a8083063 Iustin Pop
    """
7106 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
7107 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
7108 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
7109 a8083063 Iustin Pop
        snode = dev.logical_id[1]
7110 a8083063 Iustin Pop
      else:
7111 a8083063 Iustin Pop
        snode = dev.logical_id[0]
7112 a8083063 Iustin Pop
7113 98825740 Michael Hanselmann
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
7114 98825740 Michael Hanselmann
                                              instance.name, dev)
7115 98825740 Michael Hanselmann
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
7116 a8083063 Iustin Pop
7117 a8083063 Iustin Pop
    if dev.children:
7118 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
7119 a8083063 Iustin Pop
                      for child in dev.children]
7120 a8083063 Iustin Pop
    else:
7121 a8083063 Iustin Pop
      dev_children = []
7122 a8083063 Iustin Pop
7123 a8083063 Iustin Pop
    data = {
7124 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
7125 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
7126 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
7127 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
7128 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
7129 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
7130 a8083063 Iustin Pop
      "children": dev_children,
7131 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
7132 c98162a7 Iustin Pop
      "size": dev.size,
7133 a8083063 Iustin Pop
      }
7134 a8083063 Iustin Pop
7135 a8083063 Iustin Pop
    return data
7136 a8083063 Iustin Pop
7137 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7138 a8083063 Iustin Pop
    """Gather and return data"""
7139 a8083063 Iustin Pop
    result = {}
7140 338e51e8 Iustin Pop
7141 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
7142 338e51e8 Iustin Pop
7143 a8083063 Iustin Pop
    for instance in self.wanted_instances:
7144 57821cac Iustin Pop
      if not self.op.static:
7145 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
7146 57821cac Iustin Pop
                                                  instance.name,
7147 57821cac Iustin Pop
                                                  instance.hypervisor)
7148 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
7149 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
7150 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
7151 57821cac Iustin Pop
          remote_state = "up"
7152 57821cac Iustin Pop
        else:
7153 57821cac Iustin Pop
          remote_state = "down"
7154 a8083063 Iustin Pop
      else:
7155 57821cac Iustin Pop
        remote_state = None
7156 0d68c45d Iustin Pop
      if instance.admin_up:
7157 a8083063 Iustin Pop
        config_state = "up"
7158 0d68c45d Iustin Pop
      else:
7159 0d68c45d Iustin Pop
        config_state = "down"
7160 a8083063 Iustin Pop
7161 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
7162 a8083063 Iustin Pop
               for device in instance.disks]
7163 a8083063 Iustin Pop
7164 a8083063 Iustin Pop
      idict = {
7165 a8083063 Iustin Pop
        "name": instance.name,
7166 a8083063 Iustin Pop
        "config_state": config_state,
7167 a8083063 Iustin Pop
        "run_state": remote_state,
7168 a8083063 Iustin Pop
        "pnode": instance.primary_node,
7169 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
7170 a8083063 Iustin Pop
        "os": instance.os,
7171 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
7172 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
7173 a8083063 Iustin Pop
        "disks": disks,
7174 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
7175 24838135 Iustin Pop
        "network_port": instance.network_port,
7176 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
7177 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
7178 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
7179 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
7180 90f72445 Iustin Pop
        "serial_no": instance.serial_no,
7181 90f72445 Iustin Pop
        "mtime": instance.mtime,
7182 90f72445 Iustin Pop
        "ctime": instance.ctime,
7183 033d58b0 Iustin Pop
        "uuid": instance.uuid,
7184 a8083063 Iustin Pop
        }
7185 a8083063 Iustin Pop
7186 a8083063 Iustin Pop
      result[instance.name] = idict
7187 a8083063 Iustin Pop
7188 a8083063 Iustin Pop
    return result
7189 a8083063 Iustin Pop
7190 a8083063 Iustin Pop
7191 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
7192 a8083063 Iustin Pop
  """Modifies an instances's parameters.
7193 a8083063 Iustin Pop

7194 a8083063 Iustin Pop
  """
7195 a8083063 Iustin Pop
  HPATH = "instance-modify"
7196 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7197 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
7198 1a5c7281 Guido Trotter
  REQ_BGL = False
7199 1a5c7281 Guido Trotter
7200 24991749 Iustin Pop
  def CheckArguments(self):
7201 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
7202 24991749 Iustin Pop
      self.op.nics = []
7203 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
7204 24991749 Iustin Pop
      self.op.disks = []
7205 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
7206 24991749 Iustin Pop
      self.op.beparams = {}
7207 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
7208 24991749 Iustin Pop
      self.op.hvparams = {}
7209 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
7210 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
7211 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
7212 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
7213 24991749 Iustin Pop
7214 24991749 Iustin Pop
    # Disk validation
7215 24991749 Iustin Pop
    disk_addremove = 0
7216 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7217 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7218 24991749 Iustin Pop
        disk_addremove += 1
7219 24991749 Iustin Pop
        continue
7220 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7221 24991749 Iustin Pop
        disk_addremove += 1
7222 24991749 Iustin Pop
      else:
7223 24991749 Iustin Pop
        if not isinstance(disk_op, int):
7224 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
7225 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
7226 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
7227 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
7228 8b46606c Guido Trotter
7229 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
7230 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
7231 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
7232 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
7233 24991749 Iustin Pop
        size = disk_dict.get('size', None)
7234 24991749 Iustin Pop
        if size is None:
7235 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
7236 24991749 Iustin Pop
        try:
7237 24991749 Iustin Pop
          size = int(size)
7238 24991749 Iustin Pop
        except ValueError, err:
7239 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
7240 24991749 Iustin Pop
                                     str(err))
7241 24991749 Iustin Pop
        disk_dict['size'] = size
7242 24991749 Iustin Pop
      else:
7243 24991749 Iustin Pop
        # modification of disk
7244 24991749 Iustin Pop
        if 'size' in disk_dict:
7245 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
7246 24991749 Iustin Pop
                                     " grow-disk")
7247 24991749 Iustin Pop
7248 24991749 Iustin Pop
    if disk_addremove > 1:
7249 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
7250 24991749 Iustin Pop
                                 " supported at a time")
7251 24991749 Iustin Pop
7252 24991749 Iustin Pop
    # NIC validation
7253 24991749 Iustin Pop
    nic_addremove = 0
7254 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7255 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7256 24991749 Iustin Pop
        nic_addremove += 1
7257 24991749 Iustin Pop
        continue
7258 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7259 24991749 Iustin Pop
        nic_addremove += 1
7260 24991749 Iustin Pop
      else:
7261 24991749 Iustin Pop
        if not isinstance(nic_op, int):
7262 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
7263 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
7264 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
7265 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
7266 24991749 Iustin Pop
7267 24991749 Iustin Pop
      # nic_dict should be a dict
7268 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
7269 24991749 Iustin Pop
      if nic_ip is not None:
7270 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
7271 24991749 Iustin Pop
          nic_dict['ip'] = None
7272 24991749 Iustin Pop
        else:
7273 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
7274 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
7275 5c44da6a Guido Trotter
7276 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
7277 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
7278 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
7279 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
7280 29921401 Iustin Pop
                                   " at the same time")
7281 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
7282 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
7283 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
7284 cd098c41 Guido Trotter
        nic_dict['link'] = None
7285 cd098c41 Guido Trotter
7286 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
7287 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
7288 5c44da6a Guido Trotter
        if nic_mac is None:
7289 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
7290 5c44da6a Guido Trotter
7291 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7292 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7293 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7294 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
7295 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
7296 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
7297 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
7298 5c44da6a Guido Trotter
                                     " modifying an existing nic")
7299 5c44da6a Guido Trotter
7300 24991749 Iustin Pop
    if nic_addremove > 1:
7301 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
7302 24991749 Iustin Pop
                                 " supported at a time")
7303 24991749 Iustin Pop
7304 1a5c7281 Guido Trotter
  def ExpandNames(self):
7305 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
7306 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
7307 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
7308 74409b12 Iustin Pop
7309 74409b12 Iustin Pop
  def DeclareLocks(self, level):
7310 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
7311 74409b12 Iustin Pop
      self._LockInstancesNodes()
7312 a8083063 Iustin Pop
7313 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7314 a8083063 Iustin Pop
    """Build hooks env.
7315 a8083063 Iustin Pop

7316 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
7317 a8083063 Iustin Pop

7318 a8083063 Iustin Pop
    """
7319 396e1b78 Michael Hanselmann
    args = dict()
7320 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
7321 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
7322 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
7323 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
7324 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
7325 d8dcf3c9 Guido Trotter
    # information at all.
7326 d8dcf3c9 Guido Trotter
    if self.op.nics:
7327 d8dcf3c9 Guido Trotter
      args['nics'] = []
7328 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
7329 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
7330 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
7331 d8dcf3c9 Guido Trotter
        if idx in nic_override:
7332 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
7333 d8dcf3c9 Guido Trotter
        else:
7334 d8dcf3c9 Guido Trotter
          this_nic_override = {}
7335 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
7336 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
7337 d8dcf3c9 Guido Trotter
        else:
7338 d8dcf3c9 Guido Trotter
          ip = nic.ip
7339 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
7340 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
7341 d8dcf3c9 Guido Trotter
        else:
7342 d8dcf3c9 Guido Trotter
          mac = nic.mac
7343 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
7344 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
7345 62f0dd02 Guido Trotter
        else:
7346 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
7347 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7348 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7349 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7350 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
7351 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
7352 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
7353 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
7354 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
7355 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
7356 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
7357 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
7358 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
7359 d8dcf3c9 Guido Trotter
7360 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
7361 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
7362 a8083063 Iustin Pop
    return env, nl, nl
7363 a8083063 Iustin Pop
7364 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
7365 0329617a Guido Trotter
                        default_values, parameter_types):
7366 0329617a Guido Trotter
    """Return the new params dict for the given params.
7367 0329617a Guido Trotter

7368 0329617a Guido Trotter
    @type old_params: dict
7369 f2fd87d7 Iustin Pop
    @param old_params: old parameters
7370 0329617a Guido Trotter
    @type update_dict: dict
7371 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
7372 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
7373 f2fd87d7 Iustin Pop
                        parameter to its default value
7374 0329617a Guido Trotter
    @type default_values: dict
7375 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
7376 0329617a Guido Trotter
    @type parameter_types: dict
7377 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
7378 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
7379 0329617a Guido Trotter
    @rtype: (dict, dict)
7380 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
7381 0329617a Guido Trotter

7382 0329617a Guido Trotter
    """
7383 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
7384 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
7385 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
7386 0329617a Guido Trotter
        try:
7387 0329617a Guido Trotter
          del params_copy[key]
7388 0329617a Guido Trotter
        except KeyError:
7389 0329617a Guido Trotter
          pass
7390 0329617a Guido Trotter
      else:
7391 0329617a Guido Trotter
        params_copy[key] = val
7392 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
7393 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
7394 0329617a Guido Trotter
    return (params_copy, params_filled)
7395 0329617a Guido Trotter
7396 a8083063 Iustin Pop
  def CheckPrereq(self):
7397 a8083063 Iustin Pop
    """Check prerequisites.
7398 a8083063 Iustin Pop

7399 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
7400 a8083063 Iustin Pop

7401 a8083063 Iustin Pop
    """
7402 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
7403 a8083063 Iustin Pop
7404 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
7405 31a853d2 Iustin Pop
7406 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
7407 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
7408 1a5c7281 Guido Trotter
    assert self.instance is not None, \
7409 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
7410 6b12959c Iustin Pop
    pnode = instance.primary_node
7411 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
7412 74409b12 Iustin Pop
7413 338e51e8 Iustin Pop
    # hvparams processing
7414 74409b12 Iustin Pop
    if self.op.hvparams:
7415 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
7416 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
7417 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
7418 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
7419 74409b12 Iustin Pop
      # local check
7420 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
7421 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
7422 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
7423 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
7424 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
7425 338e51e8 Iustin Pop
    else:
7426 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
7427 338e51e8 Iustin Pop
7428 338e51e8 Iustin Pop
    # beparams processing
7429 338e51e8 Iustin Pop
    if self.op.beparams:
7430 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
7431 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
7432 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
7433 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
7434 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
7435 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
7436 338e51e8 Iustin Pop
    else:
7437 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
7438 74409b12 Iustin Pop
7439 cfefe007 Guido Trotter
    self.warn = []
7440 647a5d80 Iustin Pop
7441 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
7442 647a5d80 Iustin Pop
      mem_check_list = [pnode]
7443 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7444 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
7445 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
7446 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
7447 72737a7f Iustin Pop
                                                  instance.hypervisor)
7448 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
7449 72737a7f Iustin Pop
                                         instance.hypervisor)
7450 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
7451 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
7452 070e998b Iustin Pop
      if msg:
7453 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
7454 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
7455 070e998b Iustin Pop
                         (pnode,  msg))
7456 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
7457 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
7458 070e998b Iustin Pop
                         " free memory information" % pnode)
7459 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
7460 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
7461 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
7462 cfefe007 Guido Trotter
      else:
7463 7ad1af4a Iustin Pop
        if instance_info.payload:
7464 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
7465 cfefe007 Guido Trotter
        else:
7466 cfefe007 Guido Trotter
          # Assume instance not running
7467 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
7468 cfefe007 Guido Trotter
          # and we have no other way to check)
7469 cfefe007 Guido Trotter
          current_mem = 0
7470 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
7471 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
7472 cfefe007 Guido Trotter
        if miss_mem > 0:
7473 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
7474 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
7475 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
7476 cfefe007 Guido Trotter
7477 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
7478 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
7479 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
7480 ea33068f Iustin Pop
            continue
7481 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
7482 070e998b Iustin Pop
          if msg:
7483 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
7484 070e998b Iustin Pop
                             (node, msg))
7485 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
7486 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
7487 070e998b Iustin Pop
                             " memory information" % node)
7488 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
7489 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
7490 647a5d80 Iustin Pop
                             " secondary node %s" % node)
7491 5bc84f33 Alexander Schreiber
7492 24991749 Iustin Pop
    # NIC processing
7493 cd098c41 Guido Trotter
    self.nic_pnew = {}
7494 cd098c41 Guido Trotter
    self.nic_pinst = {}
7495 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7496 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7497 24991749 Iustin Pop
        if not instance.nics:
7498 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
7499 24991749 Iustin Pop
        continue
7500 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
7501 24991749 Iustin Pop
        # an existing nic
7502 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
7503 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
7504 24991749 Iustin Pop
                                     " are 0 to %d" %
7505 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
7506 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
7507 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
7508 cd098c41 Guido Trotter
      else:
7509 cd098c41 Guido Trotter
        old_nic_params = {}
7510 cd098c41 Guido Trotter
        old_nic_ip = None
7511 cd098c41 Guido Trotter
7512 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
7513 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
7514 cd098c41 Guido Trotter
                                 if key in nic_dict])
7515 cd098c41 Guido Trotter
7516 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
7517 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
7518 cd098c41 Guido Trotter
7519 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
7520 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
7521 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
7522 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
7523 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
7524 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
7525 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
7526 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
7527 cd098c41 Guido Trotter
7528 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
7529 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
7530 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
7531 35c0c8da Iustin Pop
        if msg:
7532 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
7533 24991749 Iustin Pop
          if self.force:
7534 24991749 Iustin Pop
            self.warn.append(msg)
7535 24991749 Iustin Pop
          else:
7536 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
7537 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
7538 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
7539 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
7540 cd098c41 Guido Trotter
        else:
7541 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
7542 cd098c41 Guido Trotter
        if nic_ip is None:
7543 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
7544 cd098c41 Guido Trotter
                                     ' on a routed nic')
7545 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
7546 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
7547 5c44da6a Guido Trotter
        if nic_mac is None:
7548 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
7549 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
7550 5c44da6a Guido Trotter
          # otherwise generate the mac
7551 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
7552 5c44da6a Guido Trotter
        else:
7553 5c44da6a Guido Trotter
          # or validate/reserve the current one
7554 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
7555 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
7556 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
7557 24991749 Iustin Pop
7558 24991749 Iustin Pop
    # DISK processing
7559 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
7560 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
7561 24991749 Iustin Pop
                                 " diskless instances")
7562 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7563 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7564 24991749 Iustin Pop
        if len(instance.disks) == 1:
7565 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
7566 24991749 Iustin Pop
                                     " an instance")
7567 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
7568 24991749 Iustin Pop
        ins_l = ins_l[pnode]
7569 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
7570 aca13712 Iustin Pop
        if msg:
7571 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
7572 aca13712 Iustin Pop
                                     (pnode, msg))
7573 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
7574 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
7575 24991749 Iustin Pop
                                     " disks.")
7576 24991749 Iustin Pop
7577 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
7578 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
7579 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
7580 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
7581 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
7582 24991749 Iustin Pop
        # an existing disk
7583 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
7584 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
7585 24991749 Iustin Pop
                                     " are 0 to %d" %
7586 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
7587 24991749 Iustin Pop
7588 a8083063 Iustin Pop
    return
7589 a8083063 Iustin Pop
7590 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7591 a8083063 Iustin Pop
    """Modifies an instance.
7592 a8083063 Iustin Pop

7593 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
7594 24991749 Iustin Pop

7595 a8083063 Iustin Pop
    """
7596 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
7597 cfefe007 Guido Trotter
    # feedback_fn there.
7598 cfefe007 Guido Trotter
    for warn in self.warn:
7599 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
7600 cfefe007 Guido Trotter
7601 a8083063 Iustin Pop
    result = []
7602 a8083063 Iustin Pop
    instance = self.instance
7603 cd098c41 Guido Trotter
    cluster = self.cluster
7604 24991749 Iustin Pop
    # disk changes
7605 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
7606 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
7607 24991749 Iustin Pop
        # remove the last disk
7608 24991749 Iustin Pop
        device = instance.disks.pop()
7609 24991749 Iustin Pop
        device_idx = len(instance.disks)
7610 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
7611 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
7612 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
7613 e1bc0878 Iustin Pop
          if msg:
7614 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
7615 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
7616 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
7617 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
7618 24991749 Iustin Pop
        # add a new disk
7619 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
7620 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
7621 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
7622 24991749 Iustin Pop
        else:
7623 24991749 Iustin Pop
          file_driver = file_path = None
7624 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
7625 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
7626 24991749 Iustin Pop
                                         instance.disk_template,
7627 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
7628 24991749 Iustin Pop
                                         instance.secondary_nodes,
7629 24991749 Iustin Pop
                                         [disk_dict],
7630 24991749 Iustin Pop
                                         file_path,
7631 24991749 Iustin Pop
                                         file_driver,
7632 24991749 Iustin Pop
                                         disk_idx_base)[0]
7633 24991749 Iustin Pop
        instance.disks.append(new_disk)
7634 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
7635 24991749 Iustin Pop
7636 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
7637 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
7638 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
7639 24991749 Iustin Pop
        #HARDCODE
7640 428958aa Iustin Pop
        for node in instance.all_nodes:
7641 428958aa Iustin Pop
          f_create = node == instance.primary_node
7642 796cab27 Iustin Pop
          try:
7643 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
7644 428958aa Iustin Pop
                            f_create, info, f_create)
7645 1492cca7 Iustin Pop
          except errors.OpExecError, err:
7646 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
7647 428958aa Iustin Pop
                            " node %s: %s",
7648 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
7649 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
7650 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
7651 24991749 Iustin Pop
      else:
7652 24991749 Iustin Pop
        # change a given disk
7653 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
7654 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
7655 24991749 Iustin Pop
    # NIC changes
7656 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
7657 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
7658 24991749 Iustin Pop
        # remove the last nic
7659 24991749 Iustin Pop
        del instance.nics[-1]
7660 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
7661 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
7662 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
7663 5c44da6a Guido Trotter
        mac = nic_dict['mac']
7664 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
7665 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
7666 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
7667 24991749 Iustin Pop
        instance.nics.append(new_nic)
7668 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
7669 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
7670 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
7671 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
7672 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
7673 cd098c41 Guido Trotter
                       )))
7674 24991749 Iustin Pop
      else:
7675 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
7676 24991749 Iustin Pop
          if key in nic_dict:
7677 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
7678 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
7679 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
7680 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
7681 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
7682 24991749 Iustin Pop
7683 24991749 Iustin Pop
    # hvparams changes
7684 74409b12 Iustin Pop
    if self.op.hvparams:
7685 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
7686 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
7687 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
7688 24991749 Iustin Pop
7689 24991749 Iustin Pop
    # beparams changes
7690 338e51e8 Iustin Pop
    if self.op.beparams:
7691 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
7692 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
7693 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
7694 a8083063 Iustin Pop
7695 a4eae71f Michael Hanselmann
    self.cfg.Update(instance, feedback_fn)
7696 a8083063 Iustin Pop
7697 a8083063 Iustin Pop
    return result
7698 a8083063 Iustin Pop
7699 a8083063 Iustin Pop
7700 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
7701 a8083063 Iustin Pop
  """Query the exports list
7702 a8083063 Iustin Pop

7703 a8083063 Iustin Pop
  """
7704 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
7705 21a15682 Guido Trotter
  REQ_BGL = False
7706 21a15682 Guido Trotter
7707 21a15682 Guido Trotter
  def ExpandNames(self):
7708 21a15682 Guido Trotter
    self.needed_locks = {}
7709 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
7710 21a15682 Guido Trotter
    if not self.op.nodes:
7711 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7712 21a15682 Guido Trotter
    else:
7713 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
7714 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
7715 a8083063 Iustin Pop
7716 a8083063 Iustin Pop
  def CheckPrereq(self):
7717 21a15682 Guido Trotter
    """Check prerequisites.
7718 a8083063 Iustin Pop

7719 a8083063 Iustin Pop
    """
7720 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
7721 a8083063 Iustin Pop
7722 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7723 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
7724 a8083063 Iustin Pop

7725 e4376078 Iustin Pop
    @rtype: dict
7726 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
7727 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
7728 e4376078 Iustin Pop
        that node.
7729 a8083063 Iustin Pop

7730 a8083063 Iustin Pop
    """
7731 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
7732 b04285f2 Guido Trotter
    result = {}
7733 b04285f2 Guido Trotter
    for node in rpcresult:
7734 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
7735 b04285f2 Guido Trotter
        result[node] = False
7736 b04285f2 Guido Trotter
      else:
7737 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
7738 b04285f2 Guido Trotter
7739 b04285f2 Guido Trotter
    return result
7740 a8083063 Iustin Pop
7741 a8083063 Iustin Pop
7742 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
7743 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
7744 a8083063 Iustin Pop

7745 a8083063 Iustin Pop
  """
7746 a8083063 Iustin Pop
  HPATH = "instance-export"
7747 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
7748 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
7749 6657590e Guido Trotter
  REQ_BGL = False
7750 6657590e Guido Trotter
7751 17c3f802 Guido Trotter
  def CheckArguments(self):
7752 17c3f802 Guido Trotter
    """Check the arguments.
7753 17c3f802 Guido Trotter

7754 17c3f802 Guido Trotter
    """
7755 17c3f802 Guido Trotter
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
7756 17c3f802 Guido Trotter
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
7757 17c3f802 Guido Trotter
7758 6657590e Guido Trotter
  def ExpandNames(self):
7759 6657590e Guido Trotter
    self._ExpandAndLockInstance()
7760 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
7761 6657590e Guido Trotter
    #
7762 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
7763 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
7764 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
7765 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
7766 6657590e Guido Trotter
    #    then one to remove, after
7767 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
7768 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7769 6657590e Guido Trotter
7770 6657590e Guido Trotter
  def DeclareLocks(self, level):
7771 6657590e Guido Trotter
    """Last minute lock declaration."""
7772 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
7773 a8083063 Iustin Pop
7774 a8083063 Iustin Pop
  def BuildHooksEnv(self):
7775 a8083063 Iustin Pop
    """Build hooks env.
7776 a8083063 Iustin Pop

7777 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
7778 a8083063 Iustin Pop

7779 a8083063 Iustin Pop
    """
7780 a8083063 Iustin Pop
    env = {
7781 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
7782 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
7783 17c3f802 Guido Trotter
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
7784 a8083063 Iustin Pop
      }
7785 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
7786 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
7787 a8083063 Iustin Pop
          self.op.target_node]
7788 a8083063 Iustin Pop
    return env, nl, nl
7789 a8083063 Iustin Pop
7790 a8083063 Iustin Pop
  def CheckPrereq(self):
7791 a8083063 Iustin Pop
    """Check prerequisites.
7792 a8083063 Iustin Pop

7793 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
7794 a8083063 Iustin Pop

7795 a8083063 Iustin Pop
    """
7796 6657590e Guido Trotter
    instance_name = self.op.instance_name
7797 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
7798 6657590e Guido Trotter
    assert self.instance is not None, \
7799 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
7800 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
7801 a8083063 Iustin Pop
7802 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
7803 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
7804 a8083063 Iustin Pop
7805 268b8e42 Iustin Pop
    if self.dst_node is None:
7806 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
7807 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
7808 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
7809 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
7810 a8083063 Iustin Pop
7811 b6023d6c Manuel Franceschini
    # instance disk type verification
7812 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
7813 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
7814 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
7815 b6023d6c Manuel Franceschini
                                   " file-based disks")
7816 b6023d6c Manuel Franceschini
7817 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
7818 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
7819 a8083063 Iustin Pop

7820 a8083063 Iustin Pop
    """
7821 a8083063 Iustin Pop
    instance = self.instance
7822 a8083063 Iustin Pop
    dst_node = self.dst_node
7823 a8083063 Iustin Pop
    src_node = instance.primary_node
7824 37972df0 Michael Hanselmann
7825 a8083063 Iustin Pop
    if self.op.shutdown:
7826 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
7827 37972df0 Michael Hanselmann
      feedback_fn("Shutting down instance %s" % instance.name)
7828 17c3f802 Guido Trotter
      result = self.rpc.call_instance_shutdown(src_node, instance,
7829 17c3f802 Guido Trotter
                                               self.shutdown_timeout)
7830 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
7831 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
7832 a8083063 Iustin Pop
7833 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
7834 a8083063 Iustin Pop
7835 a8083063 Iustin Pop
    snap_disks = []
7836 a8083063 Iustin Pop
7837 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
7838 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
7839 998c712c Iustin Pop
    for disk in instance.disks:
7840 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
7841 998c712c Iustin Pop
7842 3e53a60b Michael Hanselmann
    activate_disks = (not instance.admin_up)
7843 3e53a60b Michael Hanselmann
7844 3e53a60b Michael Hanselmann
    if activate_disks:
7845 3e53a60b Michael Hanselmann
      # Activate the instance disks if we'exporting a stopped instance
7846 3e53a60b Michael Hanselmann
      feedback_fn("Activating disks for %s" % instance.name)
7847 3e53a60b Michael Hanselmann
      _StartInstanceDisks(self, instance, None)
7848 3e53a60b Michael Hanselmann
7849 a8083063 Iustin Pop
    try:
7850 3e53a60b Michael Hanselmann
      # per-disk results
7851 3e53a60b Michael Hanselmann
      dresults = []
7852 3e53a60b Michael Hanselmann
      try:
7853 3e53a60b Michael Hanselmann
        for idx, disk in enumerate(instance.disks):
7854 3e53a60b Michael Hanselmann
          feedback_fn("Creating a snapshot of disk/%s on node %s" %
7855 3e53a60b Michael Hanselmann
                      (idx, src_node))
7856 3e53a60b Michael Hanselmann
7857 3e53a60b Michael Hanselmann
          # result.payload will be a snapshot of an lvm leaf of the one we
7858 3e53a60b Michael Hanselmann
          # passed
7859 3e53a60b Michael Hanselmann
          result = self.rpc.call_blockdev_snapshot(src_node, disk)
7860 3e53a60b Michael Hanselmann
          msg = result.fail_msg
7861 3e53a60b Michael Hanselmann
          if msg:
7862 3e53a60b Michael Hanselmann
            self.LogWarning("Could not snapshot disk/%s on node %s: %s",
7863 3e53a60b Michael Hanselmann
                            idx, src_node, msg)
7864 3e53a60b Michael Hanselmann
            snap_disks.append(False)
7865 3e53a60b Michael Hanselmann
          else:
7866 3e53a60b Michael Hanselmann
            disk_id = (vgname, result.payload)
7867 3e53a60b Michael Hanselmann
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
7868 3e53a60b Michael Hanselmann
                                   logical_id=disk_id, physical_id=disk_id,
7869 3e53a60b Michael Hanselmann
                                   iv_name=disk.iv_name)
7870 3e53a60b Michael Hanselmann
            snap_disks.append(new_dev)
7871 37972df0 Michael Hanselmann
7872 3e53a60b Michael Hanselmann
      finally:
7873 3e53a60b Michael Hanselmann
        if self.op.shutdown and instance.admin_up:
7874 3e53a60b Michael Hanselmann
          feedback_fn("Starting instance %s" % instance.name)
7875 3e53a60b Michael Hanselmann
          result = self.rpc.call_instance_start(src_node, instance, None, None)
7876 3e53a60b Michael Hanselmann
          msg = result.fail_msg
7877 3e53a60b Michael Hanselmann
          if msg:
7878 3e53a60b Michael Hanselmann
            _ShutdownInstanceDisks(self, instance)
7879 3e53a60b Michael Hanselmann
            raise errors.OpExecError("Could not start instance: %s" % msg)
7880 3e53a60b Michael Hanselmann
7881 3e53a60b Michael Hanselmann
      # TODO: check for size
7882 3e53a60b Michael Hanselmann
7883 3e53a60b Michael Hanselmann
      cluster_name = self.cfg.GetClusterName()
7884 3e53a60b Michael Hanselmann
      for idx, dev in enumerate(snap_disks):
7885 3e53a60b Michael Hanselmann
        feedback_fn("Exporting snapshot %s from %s to %s" %
7886 3e53a60b Michael Hanselmann
                    (idx, src_node, dst_node.name))
7887 3e53a60b Michael Hanselmann
        if dev:
7888 3e53a60b Michael Hanselmann
          result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
7889 3e53a60b Michael Hanselmann
                                                 instance, cluster_name, idx)
7890 3e53a60b Michael Hanselmann
          msg = result.fail_msg
7891 3e53a60b Michael Hanselmann
          if msg:
7892 3e53a60b Michael Hanselmann
            self.LogWarning("Could not export disk/%s from node %s to"
7893 3e53a60b Michael Hanselmann
                            " node %s: %s", idx, src_node, dst_node.name, msg)
7894 3e53a60b Michael Hanselmann
            dresults.append(False)
7895 3e53a60b Michael Hanselmann
          else:
7896 3e53a60b Michael Hanselmann
            dresults.append(True)
7897 3e53a60b Michael Hanselmann
          msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
7898 3e53a60b Michael Hanselmann
          if msg:
7899 3e53a60b Michael Hanselmann
            self.LogWarning("Could not remove snapshot for disk/%d from node"
7900 3e53a60b Michael Hanselmann
                            " %s: %s", idx, src_node, msg)
7901 19d7f90a Guido Trotter
        else:
7902 084f05a5 Iustin Pop
          dresults.append(False)
7903 a8083063 Iustin Pop
7904 3e53a60b Michael Hanselmann
      feedback_fn("Finalizing export on %s" % dst_node.name)
7905 3e53a60b Michael Hanselmann
      result = self.rpc.call_finalize_export(dst_node.name, instance,
7906 3e53a60b Michael Hanselmann
                                             snap_disks)
7907 3e53a60b Michael Hanselmann
      fin_resu = True
7908 3e53a60b Michael Hanselmann
      msg = result.fail_msg
7909 3e53a60b Michael Hanselmann
      if msg:
7910 3e53a60b Michael Hanselmann
        self.LogWarning("Could not finalize export for instance %s"
7911 3e53a60b Michael Hanselmann
                        " on node %s: %s", instance.name, dst_node.name, msg)
7912 3e53a60b Michael Hanselmann
        fin_resu = False
7913 3e53a60b Michael Hanselmann
7914 3e53a60b Michael Hanselmann
    finally:
7915 3e53a60b Michael Hanselmann
      if activate_disks:
7916 3e53a60b Michael Hanselmann
        feedback_fn("Deactivating disks for %s" % instance.name)
7917 3e53a60b Michael Hanselmann
        _ShutdownInstanceDisks(self, instance)
7918 a8083063 Iustin Pop
7919 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
7920 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
7921 a8083063 Iustin Pop
7922 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
7923 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
7924 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
7925 35fbcd11 Iustin Pop
    iname = instance.name
7926 a8083063 Iustin Pop
    if nodelist:
7927 37972df0 Michael Hanselmann
      feedback_fn("Removing old exports for instance %s" % iname)
7928 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
7929 a8083063 Iustin Pop
      for node in exportlist:
7930 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
7931 781de953 Iustin Pop
          continue
7932 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
7933 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
7934 35fbcd11 Iustin Pop
          if msg:
7935 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
7936 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
7937 084f05a5 Iustin Pop
    return fin_resu, dresults
7938 5c947f38 Iustin Pop
7939 5c947f38 Iustin Pop
7940 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
7941 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
7942 9ac99fda Guido Trotter

7943 9ac99fda Guido Trotter
  """
7944 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
7945 3656b3af Guido Trotter
  REQ_BGL = False
7946 3656b3af Guido Trotter
7947 3656b3af Guido Trotter
  def ExpandNames(self):
7948 3656b3af Guido Trotter
    self.needed_locks = {}
7949 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
7950 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
7951 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
7952 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
7953 9ac99fda Guido Trotter
7954 9ac99fda Guido Trotter
  def CheckPrereq(self):
7955 9ac99fda Guido Trotter
    """Check prerequisites.
7956 9ac99fda Guido Trotter
    """
7957 9ac99fda Guido Trotter
    pass
7958 9ac99fda Guido Trotter
7959 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
7960 9ac99fda Guido Trotter
    """Remove any export.
7961 9ac99fda Guido Trotter

7962 9ac99fda Guido Trotter
    """
7963 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
7964 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
7965 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
7966 9ac99fda Guido Trotter
    fqdn_warn = False
7967 9ac99fda Guido Trotter
    if not instance_name:
7968 9ac99fda Guido Trotter
      fqdn_warn = True
7969 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
7970 9ac99fda Guido Trotter
7971 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
7972 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
7973 9ac99fda Guido Trotter
    found = False
7974 9ac99fda Guido Trotter
    for node in exportlist:
7975 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
7976 1b7bfbb7 Iustin Pop
      if msg:
7977 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
7978 781de953 Iustin Pop
        continue
7979 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
7980 9ac99fda Guido Trotter
        found = True
7981 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
7982 4c4e4e1e Iustin Pop
        msg = result.fail_msg
7983 35fbcd11 Iustin Pop
        if msg:
7984 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
7985 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
7986 9ac99fda Guido Trotter
7987 9ac99fda Guido Trotter
    if fqdn_warn and not found:
7988 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
7989 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
7990 9ac99fda Guido Trotter
                  " Domain Name.")
7991 9ac99fda Guido Trotter
7992 9ac99fda Guido Trotter
7993 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
7994 5c947f38 Iustin Pop
  """Generic tags LU.
7995 5c947f38 Iustin Pop

7996 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
7997 5c947f38 Iustin Pop

7998 5c947f38 Iustin Pop
  """
7999 5c947f38 Iustin Pop
8000 8646adce Guido Trotter
  def ExpandNames(self):
8001 8646adce Guido Trotter
    self.needed_locks = {}
8002 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
8003 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
8004 5c947f38 Iustin Pop
      if name is None:
8005 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
8006 3ecf6786 Iustin Pop
                                   (self.op.name,))
8007 5c947f38 Iustin Pop
      self.op.name = name
8008 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
8009 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
8010 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
8011 5c947f38 Iustin Pop
      if name is None:
8012 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
8013 3ecf6786 Iustin Pop
                                   (self.op.name,))
8014 5c947f38 Iustin Pop
      self.op.name = name
8015 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
8016 8646adce Guido Trotter
8017 8646adce Guido Trotter
  def CheckPrereq(self):
8018 8646adce Guido Trotter
    """Check prerequisites.
8019 8646adce Guido Trotter

8020 8646adce Guido Trotter
    """
8021 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
8022 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
8023 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
8024 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
8025 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
8026 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
8027 5c947f38 Iustin Pop
    else:
8028 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
8029 3ecf6786 Iustin Pop
                                 str(self.op.kind))
8030 5c947f38 Iustin Pop
8031 5c947f38 Iustin Pop
8032 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
8033 5c947f38 Iustin Pop
  """Returns the tags of a given object.
8034 5c947f38 Iustin Pop

8035 5c947f38 Iustin Pop
  """
8036 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
8037 8646adce Guido Trotter
  REQ_BGL = False
8038 5c947f38 Iustin Pop
8039 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8040 5c947f38 Iustin Pop
    """Returns the tag list.
8041 5c947f38 Iustin Pop

8042 5c947f38 Iustin Pop
    """
8043 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
8044 5c947f38 Iustin Pop
8045 5c947f38 Iustin Pop
8046 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
8047 73415719 Iustin Pop
  """Searches the tags for a given pattern.
8048 73415719 Iustin Pop

8049 73415719 Iustin Pop
  """
8050 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
8051 8646adce Guido Trotter
  REQ_BGL = False
8052 8646adce Guido Trotter
8053 8646adce Guido Trotter
  def ExpandNames(self):
8054 8646adce Guido Trotter
    self.needed_locks = {}
8055 73415719 Iustin Pop
8056 73415719 Iustin Pop
  def CheckPrereq(self):
8057 73415719 Iustin Pop
    """Check prerequisites.
8058 73415719 Iustin Pop

8059 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
8060 73415719 Iustin Pop

8061 73415719 Iustin Pop
    """
8062 73415719 Iustin Pop
    try:
8063 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
8064 73415719 Iustin Pop
    except re.error, err:
8065 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
8066 73415719 Iustin Pop
                                 (self.op.pattern, err))
8067 73415719 Iustin Pop
8068 73415719 Iustin Pop
  def Exec(self, feedback_fn):
8069 73415719 Iustin Pop
    """Returns the tag list.
8070 73415719 Iustin Pop

8071 73415719 Iustin Pop
    """
8072 73415719 Iustin Pop
    cfg = self.cfg
8073 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
8074 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
8075 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
8076 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
8077 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
8078 73415719 Iustin Pop
    results = []
8079 73415719 Iustin Pop
    for path, target in tgts:
8080 73415719 Iustin Pop
      for tag in target.GetTags():
8081 73415719 Iustin Pop
        if self.re.search(tag):
8082 73415719 Iustin Pop
          results.append((path, tag))
8083 73415719 Iustin Pop
    return results
8084 73415719 Iustin Pop
8085 73415719 Iustin Pop
8086 f27302fa Iustin Pop
class LUAddTags(TagsLU):
8087 5c947f38 Iustin Pop
  """Sets a tag on a given object.
8088 5c947f38 Iustin Pop

8089 5c947f38 Iustin Pop
  """
8090 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8091 8646adce Guido Trotter
  REQ_BGL = False
8092 5c947f38 Iustin Pop
8093 5c947f38 Iustin Pop
  def CheckPrereq(self):
8094 5c947f38 Iustin Pop
    """Check prerequisites.
8095 5c947f38 Iustin Pop

8096 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
8097 5c947f38 Iustin Pop

8098 5c947f38 Iustin Pop
    """
8099 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8100 f27302fa Iustin Pop
    for tag in self.op.tags:
8101 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8102 5c947f38 Iustin Pop
8103 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8104 5c947f38 Iustin Pop
    """Sets the tag.
8105 5c947f38 Iustin Pop

8106 5c947f38 Iustin Pop
    """
8107 5c947f38 Iustin Pop
    try:
8108 f27302fa Iustin Pop
      for tag in self.op.tags:
8109 f27302fa Iustin Pop
        self.target.AddTag(tag)
8110 5c947f38 Iustin Pop
    except errors.TagError, err:
8111 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
8112 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8113 5c947f38 Iustin Pop
8114 5c947f38 Iustin Pop
8115 f27302fa Iustin Pop
class LUDelTags(TagsLU):
8116 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
8117 5c947f38 Iustin Pop

8118 5c947f38 Iustin Pop
  """
8119 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
8120 8646adce Guido Trotter
  REQ_BGL = False
8121 5c947f38 Iustin Pop
8122 5c947f38 Iustin Pop
  def CheckPrereq(self):
8123 5c947f38 Iustin Pop
    """Check prerequisites.
8124 5c947f38 Iustin Pop

8125 5c947f38 Iustin Pop
    This checks that we have the given tag.
8126 5c947f38 Iustin Pop

8127 5c947f38 Iustin Pop
    """
8128 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
8129 f27302fa Iustin Pop
    for tag in self.op.tags:
8130 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
8131 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
8132 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
8133 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
8134 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
8135 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
8136 f27302fa Iustin Pop
      diff_names.sort()
8137 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
8138 f27302fa Iustin Pop
                                 (",".join(diff_names)))
8139 5c947f38 Iustin Pop
8140 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
8141 5c947f38 Iustin Pop
    """Remove the tag from the object.
8142 5c947f38 Iustin Pop

8143 5c947f38 Iustin Pop
    """
8144 f27302fa Iustin Pop
    for tag in self.op.tags:
8145 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
8146 159d4ec6 Iustin Pop
    self.cfg.Update(self.target, feedback_fn)
8147 06009e27 Iustin Pop
8148 0eed6e61 Guido Trotter
8149 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
8150 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
8151 06009e27 Iustin Pop

8152 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
8153 06009e27 Iustin Pop
  time.
8154 06009e27 Iustin Pop

8155 06009e27 Iustin Pop
  """
8156 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
8157 fbe9022f Guido Trotter
  REQ_BGL = False
8158 06009e27 Iustin Pop
8159 fbe9022f Guido Trotter
  def ExpandNames(self):
8160 fbe9022f Guido Trotter
    """Expand names and set required locks.
8161 06009e27 Iustin Pop

8162 fbe9022f Guido Trotter
    This expands the node list, if any.
8163 06009e27 Iustin Pop

8164 06009e27 Iustin Pop
    """
8165 fbe9022f Guido Trotter
    self.needed_locks = {}
8166 06009e27 Iustin Pop
    if self.op.on_nodes:
8167 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
8168 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
8169 fbe9022f Guido Trotter
      # more information.
8170 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
8171 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
8172 fbe9022f Guido Trotter
8173 fbe9022f Guido Trotter
  def CheckPrereq(self):
8174 fbe9022f Guido Trotter
    """Check prerequisites.
8175 fbe9022f Guido Trotter

8176 fbe9022f Guido Trotter
    """
8177 06009e27 Iustin Pop
8178 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
8179 06009e27 Iustin Pop
    """Do the actual sleep.
8180 06009e27 Iustin Pop

8181 06009e27 Iustin Pop
    """
8182 06009e27 Iustin Pop
    if self.op.on_master:
8183 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
8184 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
8185 06009e27 Iustin Pop
    if self.op.on_nodes:
8186 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
8187 06009e27 Iustin Pop
      for node, node_result in result.items():
8188 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
8189 d61df03e Iustin Pop
8190 d61df03e Iustin Pop
8191 d1c2dd75 Iustin Pop
class IAllocator(object):
8192 d1c2dd75 Iustin Pop
  """IAllocator framework.
8193 d61df03e Iustin Pop

8194 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
8195 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
8196 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
8197 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
8198 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
8199 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
8200 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
8201 d1c2dd75 Iustin Pop
      easy usage
8202 d61df03e Iustin Pop

8203 d61df03e Iustin Pop
  """
8204 29859cb7 Iustin Pop
  _ALLO_KEYS = [
8205 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
8206 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
8207 d1c2dd75 Iustin Pop
    ]
8208 29859cb7 Iustin Pop
  _RELO_KEYS = [
8209 29859cb7 Iustin Pop
    "relocate_from",
8210 29859cb7 Iustin Pop
    ]
8211 d1c2dd75 Iustin Pop
8212 923ddac0 Michael Hanselmann
  def __init__(self, cfg, rpc, mode, name, **kwargs):
8213 923ddac0 Michael Hanselmann
    self.cfg = cfg
8214 923ddac0 Michael Hanselmann
    self.rpc = rpc
8215 d1c2dd75 Iustin Pop
    # init buffer variables
8216 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
8217 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
8218 29859cb7 Iustin Pop
    self.mode = mode
8219 29859cb7 Iustin Pop
    self.name = name
8220 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
8221 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
8222 a0add446 Iustin Pop
    self.hypervisor = None
8223 29859cb7 Iustin Pop
    self.relocate_from = None
8224 27579978 Iustin Pop
    # computed fields
8225 27579978 Iustin Pop
    self.required_nodes = None
8226 d1c2dd75 Iustin Pop
    # init result fields
8227 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
8228 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8229 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
8230 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8231 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
8232 29859cb7 Iustin Pop
    else:
8233 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
8234 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
8235 d1c2dd75 Iustin Pop
    for key in kwargs:
8236 29859cb7 Iustin Pop
      if key not in keyset:
8237 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
8238 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8239 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
8240 29859cb7 Iustin Pop
    for key in keyset:
8241 d1c2dd75 Iustin Pop
      if key not in kwargs:
8242 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
8243 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
8244 d1c2dd75 Iustin Pop
    self._BuildInputData()
8245 d1c2dd75 Iustin Pop
8246 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
8247 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
8248 d1c2dd75 Iustin Pop

8249 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
8250 d1c2dd75 Iustin Pop

8251 d1c2dd75 Iustin Pop
    """
8252 923ddac0 Michael Hanselmann
    cfg = self.cfg
8253 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
8254 d1c2dd75 Iustin Pop
    # cluster data
8255 d1c2dd75 Iustin Pop
    data = {
8256 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
8257 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
8258 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
8259 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
8260 d1c2dd75 Iustin Pop
      # we don't have job IDs
8261 d61df03e Iustin Pop
      }
8262 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
8263 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
8264 6286519f Iustin Pop
8265 d1c2dd75 Iustin Pop
    # node data
8266 d1c2dd75 Iustin Pop
    node_results = {}
8267 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
8268 8cc7e742 Guido Trotter
8269 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8270 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
8271 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
8272 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
8273 8cc7e742 Guido Trotter
8274 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
8275 923ddac0 Michael Hanselmann
                                        hypervisor_name)
8276 923ddac0 Michael Hanselmann
    node_iinfo = \
8277 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
8278 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
8279 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
8280 1325da74 Iustin Pop
      # first fill in static (config-based) values
8281 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
8282 d1c2dd75 Iustin Pop
      pnr = {
8283 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
8284 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
8285 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
8286 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
8287 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
8288 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
8289 d1c2dd75 Iustin Pop
        }
8290 1325da74 Iustin Pop
8291 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
8292 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
8293 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
8294 4c4e4e1e Iustin Pop
                                nname)
8295 070e998b Iustin Pop
        remote_info = nresult.payload
8296 b142ef15 Iustin Pop
8297 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
8298 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
8299 1325da74 Iustin Pop
          if attr not in remote_info:
8300 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
8301 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
8302 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
8303 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
8304 070e998b Iustin Pop
                                     " for '%s': %s" %
8305 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
8306 1325da74 Iustin Pop
        # compute memory used by primary instances
8307 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
8308 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
8309 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
8310 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
8311 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
8312 1325da74 Iustin Pop
              i_used_mem = 0
8313 1325da74 Iustin Pop
            else:
8314 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
8315 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
8316 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
8317 1325da74 Iustin Pop
8318 1325da74 Iustin Pop
            if iinfo.admin_up:
8319 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
8320 1325da74 Iustin Pop
8321 1325da74 Iustin Pop
        # compute memory used by instances
8322 1325da74 Iustin Pop
        pnr_dyn = {
8323 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
8324 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
8325 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
8326 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
8327 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
8328 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
8329 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
8330 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
8331 1325da74 Iustin Pop
          }
8332 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
8333 1325da74 Iustin Pop
8334 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
8335 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
8336 d1c2dd75 Iustin Pop
8337 d1c2dd75 Iustin Pop
    # instance data
8338 d1c2dd75 Iustin Pop
    instance_data = {}
8339 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
8340 a9fe7e8f Guido Trotter
      nic_data = []
8341 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
8342 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
8343 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
8344 a9fe7e8f Guido Trotter
            nic.nicparams)
8345 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
8346 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
8347 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
8348 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
8349 a9fe7e8f Guido Trotter
                   }
8350 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
8351 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
8352 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
8353 d1c2dd75 Iustin Pop
      pir = {
8354 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
8355 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
8356 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
8357 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
8358 d1c2dd75 Iustin Pop
        "os": iinfo.os,
8359 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
8360 d1c2dd75 Iustin Pop
        "nics": nic_data,
8361 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
8362 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
8363 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
8364 d1c2dd75 Iustin Pop
        }
8365 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
8366 88ae4f85 Iustin Pop
                                                 pir["disks"])
8367 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
8368 d61df03e Iustin Pop
8369 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
8370 d61df03e Iustin Pop
8371 d1c2dd75 Iustin Pop
    self.in_data = data
8372 d61df03e Iustin Pop
8373 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
8374 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
8375 d61df03e Iustin Pop

8376 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
8377 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8378 d61df03e Iustin Pop

8379 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8380 d1c2dd75 Iustin Pop
    done.
8381 d61df03e Iustin Pop

8382 d1c2dd75 Iustin Pop
    """
8383 d1c2dd75 Iustin Pop
    data = self.in_data
8384 d1c2dd75 Iustin Pop
8385 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
8386 d1c2dd75 Iustin Pop
8387 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
8388 27579978 Iustin Pop
      self.required_nodes = 2
8389 27579978 Iustin Pop
    else:
8390 27579978 Iustin Pop
      self.required_nodes = 1
8391 d1c2dd75 Iustin Pop
    request = {
8392 d1c2dd75 Iustin Pop
      "type": "allocate",
8393 d1c2dd75 Iustin Pop
      "name": self.name,
8394 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
8395 d1c2dd75 Iustin Pop
      "tags": self.tags,
8396 d1c2dd75 Iustin Pop
      "os": self.os,
8397 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
8398 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
8399 d1c2dd75 Iustin Pop
      "disks": self.disks,
8400 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
8401 d1c2dd75 Iustin Pop
      "nics": self.nics,
8402 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8403 d1c2dd75 Iustin Pop
      }
8404 d1c2dd75 Iustin Pop
    data["request"] = request
8405 298fe380 Iustin Pop
8406 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
8407 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
8408 298fe380 Iustin Pop

8409 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
8410 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
8411 d61df03e Iustin Pop

8412 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
8413 d1c2dd75 Iustin Pop
    done.
8414 d61df03e Iustin Pop

8415 d1c2dd75 Iustin Pop
    """
8416 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
8417 27579978 Iustin Pop
    if instance is None:
8418 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
8419 27579978 Iustin Pop
                                   " IAllocator" % self.name)
8420 27579978 Iustin Pop
8421 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
8422 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
8423 27579978 Iustin Pop
8424 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
8425 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
8426 2a139bb0 Iustin Pop
8427 27579978 Iustin Pop
    self.required_nodes = 1
8428 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
8429 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
8430 27579978 Iustin Pop
8431 d1c2dd75 Iustin Pop
    request = {
8432 2a139bb0 Iustin Pop
      "type": "relocate",
8433 d1c2dd75 Iustin Pop
      "name": self.name,
8434 27579978 Iustin Pop
      "disk_space_total": disk_space,
8435 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
8436 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
8437 d1c2dd75 Iustin Pop
      }
8438 27579978 Iustin Pop
    self.in_data["request"] = request
8439 d61df03e Iustin Pop
8440 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
8441 d1c2dd75 Iustin Pop
    """Build input data structures.
8442 d61df03e Iustin Pop

8443 d1c2dd75 Iustin Pop
    """
8444 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
8445 d61df03e Iustin Pop
8446 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
8447 d1c2dd75 Iustin Pop
      self._AddNewInstance()
8448 d1c2dd75 Iustin Pop
    else:
8449 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
8450 d61df03e Iustin Pop
8451 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
8452 d61df03e Iustin Pop
8453 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
8454 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
8455 298fe380 Iustin Pop

8456 d1c2dd75 Iustin Pop
    """
8457 72737a7f Iustin Pop
    if call_fn is None:
8458 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
8459 298fe380 Iustin Pop
8460 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
8461 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
8462 8d528b7c Iustin Pop
8463 87f5c298 Iustin Pop
    self.out_text = result.payload
8464 d1c2dd75 Iustin Pop
    if validate:
8465 d1c2dd75 Iustin Pop
      self._ValidateResult()
8466 298fe380 Iustin Pop
8467 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
8468 d1c2dd75 Iustin Pop
    """Process the allocator results.
8469 538475ca Iustin Pop

8470 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
8471 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
8472 538475ca Iustin Pop

8473 d1c2dd75 Iustin Pop
    """
8474 d1c2dd75 Iustin Pop
    try:
8475 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
8476 d1c2dd75 Iustin Pop
    except Exception, err:
8477 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
8478 d1c2dd75 Iustin Pop
8479 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
8480 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
8481 538475ca Iustin Pop
8482 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
8483 d1c2dd75 Iustin Pop
      if key not in rdict:
8484 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
8485 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
8486 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
8487 538475ca Iustin Pop
8488 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
8489 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
8490 d1c2dd75 Iustin Pop
                               " is not a list")
8491 d1c2dd75 Iustin Pop
    self.out_data = rdict
8492 538475ca Iustin Pop
8493 538475ca Iustin Pop
8494 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
8495 d61df03e Iustin Pop
  """Run allocator tests.
8496 d61df03e Iustin Pop

8497 d61df03e Iustin Pop
  This LU runs the allocator tests
8498 d61df03e Iustin Pop

8499 d61df03e Iustin Pop
  """
8500 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
8501 d61df03e Iustin Pop
8502 d61df03e Iustin Pop
  def CheckPrereq(self):
8503 d61df03e Iustin Pop
    """Check prerequisites.
8504 d61df03e Iustin Pop

8505 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
8506 d61df03e Iustin Pop

8507 d61df03e Iustin Pop
    """
8508 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8509 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
8510 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
8511 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
8512 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
8513 d61df03e Iustin Pop
                                     attr)
8514 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
8515 d61df03e Iustin Pop
      if iname is not None:
8516 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
8517 d61df03e Iustin Pop
                                   iname)
8518 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
8519 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
8520 d61df03e Iustin Pop
      for row in self.op.nics:
8521 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8522 d61df03e Iustin Pop
            "mac" not in row or
8523 d61df03e Iustin Pop
            "ip" not in row or
8524 d61df03e Iustin Pop
            "bridge" not in row):
8525 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
8526 d61df03e Iustin Pop
                                     " 'nics' parameter")
8527 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
8528 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
8529 d61df03e Iustin Pop
      for row in self.op.disks:
8530 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
8531 d61df03e Iustin Pop
            "size" not in row or
8532 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
8533 d61df03e Iustin Pop
            "mode" not in row or
8534 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
8535 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
8536 d61df03e Iustin Pop
                                     " 'disks' parameter")
8537 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
8538 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
8539 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
8540 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
8541 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
8542 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
8543 d61df03e Iustin Pop
      if fname is None:
8544 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
8545 d61df03e Iustin Pop
                                   self.op.name)
8546 d61df03e Iustin Pop
      self.op.name = fname
8547 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
8548 d61df03e Iustin Pop
    else:
8549 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
8550 d61df03e Iustin Pop
                                 self.op.mode)
8551 d61df03e Iustin Pop
8552 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
8553 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
8554 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
8555 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
8556 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
8557 d61df03e Iustin Pop
                                 self.op.direction)
8558 d61df03e Iustin Pop
8559 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
8560 d61df03e Iustin Pop
    """Run the allocator test.
8561 d61df03e Iustin Pop

8562 d61df03e Iustin Pop
    """
8563 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
8564 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8565 29859cb7 Iustin Pop
                       mode=self.op.mode,
8566 29859cb7 Iustin Pop
                       name=self.op.name,
8567 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
8568 29859cb7 Iustin Pop
                       disks=self.op.disks,
8569 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
8570 29859cb7 Iustin Pop
                       os=self.op.os,
8571 29859cb7 Iustin Pop
                       tags=self.op.tags,
8572 29859cb7 Iustin Pop
                       nics=self.op.nics,
8573 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
8574 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
8575 29859cb7 Iustin Pop
                       )
8576 29859cb7 Iustin Pop
    else:
8577 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
8578 29859cb7 Iustin Pop
                       mode=self.op.mode,
8579 29859cb7 Iustin Pop
                       name=self.op.name,
8580 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
8581 29859cb7 Iustin Pop
                       )
8582 d61df03e Iustin Pop
8583 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
8584 d1c2dd75 Iustin Pop
      result = ial.in_text
8585 298fe380 Iustin Pop
    else:
8586 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
8587 d1c2dd75 Iustin Pop
      result = ial.out_text
8588 298fe380 Iustin Pop
    return result