Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 12649e35

History | View | Annotate | Download (234.3 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 ffa1c0dc Iustin Pop
import logging
34 74409b12 Iustin Pop
import copy
35 4b7735f9 Iustin Pop
import random
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 8d14b30d Iustin Pop
from ganeti import serializer
46 112f18a5 Iustin Pop
from ganeti import ssconf
47 d61df03e Iustin Pop
48 d61df03e Iustin Pop
49 a8083063 Iustin Pop
class LogicalUnit(object):
50 396e1b78 Michael Hanselmann
  """Logical Unit base class.
51 a8083063 Iustin Pop

52 a8083063 Iustin Pop
  Subclasses must follow these rules:
53 d465bdc8 Guido Trotter
    - implement ExpandNames
54 d465bdc8 Guido Trotter
    - implement CheckPrereq
55 a8083063 Iustin Pop
    - implement Exec
56 a8083063 Iustin Pop
    - implement BuildHooksEnv
57 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
58 05f86716 Guido Trotter
    - optionally redefine their run requirements:
59 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 05f86716 Guido Trotter

61 05f86716 Guido Trotter
  Note that all commands require root permissions.
62 a8083063 Iustin Pop

63 a8083063 Iustin Pop
  """
64 a8083063 Iustin Pop
  HPATH = None
65 a8083063 Iustin Pop
  HTYPE = None
66 a8083063 Iustin Pop
  _OP_REQP = []
67 7e55040e Guido Trotter
  REQ_BGL = True
68 a8083063 Iustin Pop
69 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
70 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
73 a8083063 Iustin Pop
    validity.
74 a8083063 Iustin Pop

75 a8083063 Iustin Pop
    """
76 5bfac263 Iustin Pop
    self.proc = processor
77 a8083063 Iustin Pop
    self.op = op
78 77b657a3 Guido Trotter
    self.cfg = context.cfg
79 77b657a3 Guido Trotter
    self.context = context
80 72737a7f Iustin Pop
    self.rpc = rpc
81 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
82 d465bdc8 Guido Trotter
    self.needed_locks = None
83 6683bba2 Guido Trotter
    self.acquired_locks = {}
84 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 ca2a79e1 Guido Trotter
    self.add_locks = {}
86 ca2a79e1 Guido Trotter
    self.remove_locks = {}
87 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
88 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
89 c92b310a Michael Hanselmann
    self.__ssh = None
90 86d9d3bb Iustin Pop
    # logging
91 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
92 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
93 c92b310a Michael Hanselmann
94 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
95 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
96 a8083063 Iustin Pop
      if attr_val is None:
97 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
98 3ecf6786 Iustin Pop
                                   attr_name)
99 4be4691d Iustin Pop
    self.CheckArguments()
100 a8083063 Iustin Pop
101 c92b310a Michael Hanselmann
  def __GetSSH(self):
102 c92b310a Michael Hanselmann
    """Returns the SshRunner object
103 c92b310a Michael Hanselmann

104 c92b310a Michael Hanselmann
    """
105 c92b310a Michael Hanselmann
    if not self.__ssh:
106 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
107 c92b310a Michael Hanselmann
    return self.__ssh
108 c92b310a Michael Hanselmann
109 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
110 c92b310a Michael Hanselmann
111 4be4691d Iustin Pop
  def CheckArguments(self):
112 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
113 4be4691d Iustin Pop

114 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
115 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
116 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
117 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
118 4be4691d Iustin Pop

119 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
120 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
121 4be4691d Iustin Pop
        waited for them)
122 4be4691d Iustin Pop

123 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
124 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
125 4be4691d Iustin Pop

126 4be4691d Iustin Pop
    """
127 4be4691d Iustin Pop
    pass
128 4be4691d Iustin Pop
129 d465bdc8 Guido Trotter
  def ExpandNames(self):
130 d465bdc8 Guido Trotter
    """Expand names for this LU.
131 d465bdc8 Guido Trotter

132 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
133 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
134 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
135 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
136 d465bdc8 Guido Trotter

137 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
138 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
139 d465bdc8 Guido Trotter
    as values. Rules:
140 e4376078 Iustin Pop

141 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
142 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
143 e4376078 Iustin Pop
      - don't put anything for the BGL level
144 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
145 d465bdc8 Guido Trotter

146 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
147 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
148 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
149 3977a4c1 Guido Trotter

150 e4376078 Iustin Pop
    Examples::
151 e4376078 Iustin Pop

152 e4376078 Iustin Pop
      # Acquire all nodes and one instance
153 e4376078 Iustin Pop
      self.needed_locks = {
154 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
155 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
156 e4376078 Iustin Pop
      }
157 e4376078 Iustin Pop
      # Acquire just two nodes
158 e4376078 Iustin Pop
      self.needed_locks = {
159 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
160 e4376078 Iustin Pop
      }
161 e4376078 Iustin Pop
      # Acquire no locks
162 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
163 d465bdc8 Guido Trotter

164 d465bdc8 Guido Trotter
    """
165 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
166 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
167 d465bdc8 Guido Trotter
    # time.
168 d465bdc8 Guido Trotter
    if self.REQ_BGL:
169 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
170 d465bdc8 Guido Trotter
    else:
171 d465bdc8 Guido Trotter
      raise NotImplementedError
172 d465bdc8 Guido Trotter
173 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
174 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
175 fb8dcb62 Guido Trotter

176 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
177 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
178 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
179 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
180 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
181 fb8dcb62 Guido Trotter
    default it does nothing.
182 fb8dcb62 Guido Trotter

183 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
184 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
185 fb8dcb62 Guido Trotter

186 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
187 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
188 fb8dcb62 Guido Trotter

189 fb8dcb62 Guido Trotter
    """
190 fb8dcb62 Guido Trotter
191 a8083063 Iustin Pop
  def CheckPrereq(self):
192 a8083063 Iustin Pop
    """Check prerequisites for this LU.
193 a8083063 Iustin Pop

194 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
195 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
196 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
197 a8083063 Iustin Pop
    allowed.
198 a8083063 Iustin Pop

199 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
200 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
201 a8083063 Iustin Pop

202 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
203 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
204 a8083063 Iustin Pop

205 a8083063 Iustin Pop
    """
206 a8083063 Iustin Pop
    raise NotImplementedError
207 a8083063 Iustin Pop
208 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
209 a8083063 Iustin Pop
    """Execute the LU.
210 a8083063 Iustin Pop

211 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
212 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
213 a8083063 Iustin Pop
    code, or expected.
214 a8083063 Iustin Pop

215 a8083063 Iustin Pop
    """
216 a8083063 Iustin Pop
    raise NotImplementedError
217 a8083063 Iustin Pop
218 a8083063 Iustin Pop
  def BuildHooksEnv(self):
219 a8083063 Iustin Pop
    """Build hooks environment for this LU.
220 a8083063 Iustin Pop

221 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
222 a8083063 Iustin Pop
    containing the environment that will be used for running the
223 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
224 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
225 a8083063 Iustin Pop
    the hook should run after the execution.
226 a8083063 Iustin Pop

227 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
228 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
229 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
230 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
231 a8083063 Iustin Pop

232 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
233 a8083063 Iustin Pop

234 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
235 a8083063 Iustin Pop
    not be called.
236 a8083063 Iustin Pop

237 a8083063 Iustin Pop
    """
238 a8083063 Iustin Pop
    raise NotImplementedError
239 a8083063 Iustin Pop
240 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
241 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
242 1fce5219 Guido Trotter

243 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
244 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
245 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
246 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
247 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
248 1fce5219 Guido Trotter

249 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
250 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
251 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
252 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
253 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
254 e4376078 Iustin Pop
        in the PRE phase
255 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
256 e4376078 Iustin Pop
        and hook results
257 1fce5219 Guido Trotter

258 1fce5219 Guido Trotter
    """
259 1fce5219 Guido Trotter
    return lu_result
260 1fce5219 Guido Trotter
261 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
262 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
263 43905206 Guido Trotter

264 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
265 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
266 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
267 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
268 43905206 Guido Trotter
    before.
269 43905206 Guido Trotter

270 43905206 Guido Trotter
    """
271 43905206 Guido Trotter
    if self.needed_locks is None:
272 43905206 Guido Trotter
      self.needed_locks = {}
273 43905206 Guido Trotter
    else:
274 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
275 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
276 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
277 43905206 Guido Trotter
    if expanded_name is None:
278 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
279 43905206 Guido Trotter
                                  self.op.instance_name)
280 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
281 43905206 Guido Trotter
    self.op.instance_name = expanded_name
282 43905206 Guido Trotter
283 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
284 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
285 c4a2fee1 Guido Trotter

286 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
287 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
288 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
289 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
290 c4a2fee1 Guido Trotter

291 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
292 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
293 c4a2fee1 Guido Trotter

294 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
295 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
296 c4a2fee1 Guido Trotter

297 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
298 c4a2fee1 Guido Trotter

299 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
300 e4376078 Iustin Pop
        self._LockInstancesNodes()
301 c4a2fee1 Guido Trotter

302 a82ce292 Guido Trotter
    @type primary_only: boolean
303 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
304 a82ce292 Guido Trotter

305 c4a2fee1 Guido Trotter
    """
306 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
307 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
308 c4a2fee1 Guido Trotter
309 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
312 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
313 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
314 c4a2fee1 Guido Trotter
    wanted_nodes = []
315 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
316 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
317 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
318 a82ce292 Guido Trotter
      if not primary_only:
319 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
320 9513b6ab Guido Trotter
321 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
322 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
323 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
324 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
325 c4a2fee1 Guido Trotter
326 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
327 c4a2fee1 Guido Trotter
328 a8083063 Iustin Pop
329 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
330 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
331 a8083063 Iustin Pop

332 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
333 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
334 a8083063 Iustin Pop

335 a8083063 Iustin Pop
  """
336 a8083063 Iustin Pop
  HPATH = None
337 a8083063 Iustin Pop
  HTYPE = None
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
340 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
341 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
342 83120a01 Michael Hanselmann

343 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
344 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
345 e4376078 Iustin Pop
  @type nodes: list
346 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
347 e4376078 Iustin Pop
  @rtype: list
348 e4376078 Iustin Pop
  @return: the list of nodes, sorted
349 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
350 83120a01 Michael Hanselmann

351 83120a01 Michael Hanselmann
  """
352 3312b702 Iustin Pop
  if not isinstance(nodes, list):
353 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
354 dcb93971 Michael Hanselmann
355 ea47808a Guido Trotter
  if not nodes:
356 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
357 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
358 dcb93971 Michael Hanselmann
359 ea47808a Guido Trotter
  wanted = []
360 ea47808a Guido Trotter
  for name in nodes:
361 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
362 ea47808a Guido Trotter
    if node is None:
363 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
364 ea47808a Guido Trotter
    wanted.append(node)
365 dcb93971 Michael Hanselmann
366 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
369 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
370 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
371 3312b702 Iustin Pop

372 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
373 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
374 e4376078 Iustin Pop
  @type instances: list
375 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
376 e4376078 Iustin Pop
  @rtype: list
377 e4376078 Iustin Pop
  @return: the list of instances, sorted
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
379 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
380 3312b702 Iustin Pop

381 3312b702 Iustin Pop
  """
382 3312b702 Iustin Pop
  if not isinstance(instances, list):
383 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
384 3312b702 Iustin Pop
385 3312b702 Iustin Pop
  if instances:
386 3312b702 Iustin Pop
    wanted = []
387 3312b702 Iustin Pop
388 3312b702 Iustin Pop
    for name in instances:
389 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
390 3312b702 Iustin Pop
      if instance is None:
391 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
392 3312b702 Iustin Pop
      wanted.append(instance)
393 3312b702 Iustin Pop
394 3312b702 Iustin Pop
  else:
395 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
396 a7f5dc98 Iustin Pop
  return wanted
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
399 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
400 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
401 83120a01 Michael Hanselmann

402 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
403 31bf511f Iustin Pop
  @param static: static fields set
404 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
405 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
406 83120a01 Michael Hanselmann

407 83120a01 Michael Hanselmann
  """
408 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
409 31bf511f Iustin Pop
  f.Extend(static)
410 31bf511f Iustin Pop
  f.Extend(dynamic)
411 dcb93971 Michael Hanselmann
412 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
413 31bf511f Iustin Pop
  if delta:
414 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
415 31bf511f Iustin Pop
                               % ",".join(delta))
416 dcb93971 Michael Hanselmann
417 dcb93971 Michael Hanselmann
418 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
419 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
420 a5961235 Iustin Pop

421 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
422 a5961235 Iustin Pop
  or None (but that it always exists).
423 a5961235 Iustin Pop

424 a5961235 Iustin Pop
  """
425 a5961235 Iustin Pop
  val = getattr(op, name, None)
426 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
427 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
428 a5961235 Iustin Pop
                               (name, str(val)))
429 a5961235 Iustin Pop
  setattr(op, name, val)
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
432 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
433 a5961235 Iustin Pop
  """Ensure that a given node is online.
434 a5961235 Iustin Pop

435 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
436 a5961235 Iustin Pop
  @param node: the node to check
437 a5961235 Iustin Pop
  @raise errors.OpPrereqError: if the nodes is offline
438 a5961235 Iustin Pop

439 a5961235 Iustin Pop
  """
440 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
441 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
442 a5961235 Iustin Pop
443 a5961235 Iustin Pop
444 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
445 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
446 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
447 e4376078 Iustin Pop

448 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
449 e4376078 Iustin Pop

450 e4376078 Iustin Pop
  @type name: string
451 e4376078 Iustin Pop
  @param name: the name of the instance
452 e4376078 Iustin Pop
  @type primary_node: string
453 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
454 e4376078 Iustin Pop
  @type secondary_nodes: list
455 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
456 e4376078 Iustin Pop
  @type os_type: string
457 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
458 0d68c45d Iustin Pop
  @type status: boolean
459 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
460 e4376078 Iustin Pop
  @type memory: string
461 e4376078 Iustin Pop
  @param memory: the memory size of the instance
462 e4376078 Iustin Pop
  @type vcpus: string
463 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
464 e4376078 Iustin Pop
  @type nics: list
465 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
466 e4376078 Iustin Pop
      the NICs the instance  has
467 e4376078 Iustin Pop
  @rtype: dict
468 e4376078 Iustin Pop
  @return: the hook environment for this instance
469 ecb215b5 Michael Hanselmann

470 396e1b78 Michael Hanselmann
  """
471 0d68c45d Iustin Pop
  if status:
472 0d68c45d Iustin Pop
    str_status = "up"
473 0d68c45d Iustin Pop
  else:
474 0d68c45d Iustin Pop
    str_status = "down"
475 396e1b78 Michael Hanselmann
  env = {
476 0e137c28 Iustin Pop
    "OP_TARGET": name,
477 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
478 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
479 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
480 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
481 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
482 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
483 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
484 396e1b78 Michael Hanselmann
  }
485 396e1b78 Michael Hanselmann
486 396e1b78 Michael Hanselmann
  if nics:
487 396e1b78 Michael Hanselmann
    nic_count = len(nics)
488 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
489 396e1b78 Michael Hanselmann
      if ip is None:
490 396e1b78 Michael Hanselmann
        ip = ""
491 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
492 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
493 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
494 396e1b78 Michael Hanselmann
  else:
495 396e1b78 Michael Hanselmann
    nic_count = 0
496 396e1b78 Michael Hanselmann
497 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
498 396e1b78 Michael Hanselmann
499 396e1b78 Michael Hanselmann
  return env
500 396e1b78 Michael Hanselmann
501 396e1b78 Michael Hanselmann
502 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
503 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
504 ecb215b5 Michael Hanselmann

505 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
506 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
507 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
508 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
509 e4376078 Iustin Pop
      environment
510 e4376078 Iustin Pop
  @type override: dict
511 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
512 e4376078 Iustin Pop
      our values
513 e4376078 Iustin Pop
  @rtype: dict
514 e4376078 Iustin Pop
  @return: the hook environment dictionary
515 e4376078 Iustin Pop

516 ecb215b5 Michael Hanselmann
  """
517 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
518 396e1b78 Michael Hanselmann
  args = {
519 396e1b78 Michael Hanselmann
    'name': instance.name,
520 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
521 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
522 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
523 0d68c45d Iustin Pop
    'status': instance.admin_up,
524 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
525 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
526 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
527 396e1b78 Michael Hanselmann
  }
528 396e1b78 Michael Hanselmann
  if override:
529 396e1b78 Michael Hanselmann
    args.update(override)
530 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
531 396e1b78 Michael Hanselmann
532 396e1b78 Michael Hanselmann
533 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
534 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
535 ec0292f1 Iustin Pop

536 ec0292f1 Iustin Pop
  """
537 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
538 ec0292f1 Iustin Pop
  if mod_list:
539 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
540 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
541 ec0292f1 Iustin Pop
    for name in mod_list:
542 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
543 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
544 ec0292f1 Iustin Pop
  if mc_now > mc_max:
545 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
546 ec0292f1 Iustin Pop
               (mc_now, mc_max))
547 ec0292f1 Iustin Pop
548 ec0292f1 Iustin Pop
549 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
550 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
551 bf6929a2 Alexander Schreiber

552 bf6929a2 Alexander Schreiber
  """
553 bf6929a2 Alexander Schreiber
  # check bridges existance
554 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
555 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
556 781de953 Iustin Pop
  result.Raise()
557 781de953 Iustin Pop
  if not result.data:
558 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
559 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
560 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
561 bf6929a2 Alexander Schreiber
562 bf6929a2 Alexander Schreiber
563 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
564 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
565 a8083063 Iustin Pop

566 a8083063 Iustin Pop
  """
567 a8083063 Iustin Pop
  _OP_REQP = []
568 a8083063 Iustin Pop
569 a8083063 Iustin Pop
  def CheckPrereq(self):
570 a8083063 Iustin Pop
    """Check prerequisites.
571 a8083063 Iustin Pop

572 a8083063 Iustin Pop
    This checks whether the cluster is empty.
573 a8083063 Iustin Pop

574 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
575 a8083063 Iustin Pop

576 a8083063 Iustin Pop
    """
577 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
578 a8083063 Iustin Pop
579 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
580 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
581 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
582 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
583 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
584 db915bd1 Michael Hanselmann
    if instancelist:
585 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
586 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
587 a8083063 Iustin Pop
588 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
589 a8083063 Iustin Pop
    """Destroys the cluster.
590 a8083063 Iustin Pop

591 a8083063 Iustin Pop
    """
592 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
593 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
594 781de953 Iustin Pop
    result.Raise()
595 781de953 Iustin Pop
    if not result.data:
596 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
597 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
598 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
599 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
600 140aa4a8 Iustin Pop
    return master
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
603 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
604 a8083063 Iustin Pop
  """Verifies the cluster status.
605 a8083063 Iustin Pop

606 a8083063 Iustin Pop
  """
607 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
608 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
609 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
610 d4b9d97f Guido Trotter
  REQ_BGL = False
611 d4b9d97f Guido Trotter
612 d4b9d97f Guido Trotter
  def ExpandNames(self):
613 d4b9d97f Guido Trotter
    self.needed_locks = {
614 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
615 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
616 d4b9d97f Guido Trotter
    }
617 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
618 a8083063 Iustin Pop
619 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
620 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
621 6d2e83d5 Iustin Pop
                  drbd_map):
622 a8083063 Iustin Pop
    """Run multiple tests against a node.
623 a8083063 Iustin Pop

624 112f18a5 Iustin Pop
    Test list:
625 e4376078 Iustin Pop

626 a8083063 Iustin Pop
      - compares ganeti version
627 a8083063 Iustin Pop
      - checks vg existance and size > 20G
628 a8083063 Iustin Pop
      - checks config file checksum
629 a8083063 Iustin Pop
      - checks ssh to other nodes
630 a8083063 Iustin Pop

631 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
632 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
633 e4376078 Iustin Pop
    @param file_list: required list of files
634 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
635 e4376078 Iustin Pop
    @param node_result: the results from the node
636 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
637 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
638 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
639 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
640 6d2e83d5 Iustin Pop
        and their running status
641 098c0958 Michael Hanselmann

642 a8083063 Iustin Pop
    """
643 112f18a5 Iustin Pop
    node = nodeinfo.name
644 25361b9a Iustin Pop
645 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
646 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
647 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
648 25361b9a Iustin Pop
      return True
649 25361b9a Iustin Pop
650 a8083063 Iustin Pop
    # compares ganeti version
651 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
652 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
653 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
654 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
655 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
656 a8083063 Iustin Pop
      return True
657 a8083063 Iustin Pop
658 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
659 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
660 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
661 a8083063 Iustin Pop
      return True
662 a8083063 Iustin Pop
663 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
664 a8083063 Iustin Pop
665 a8083063 Iustin Pop
    bad = False
666 e9ce0a64 Iustin Pop
667 e9ce0a64 Iustin Pop
    # full package version
668 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
669 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
670 e9ce0a64 Iustin Pop
                  " node %s %s" %
671 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
672 e9ce0a64 Iustin Pop
673 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
674 e9ce0a64 Iustin Pop
675 25361b9a Iustin Pop
    vglist = node_result.get(constants.NV_VGLIST, None)
676 a8083063 Iustin Pop
    if not vglist:
677 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
678 a8083063 Iustin Pop
                      (node,))
679 a8083063 Iustin Pop
      bad = True
680 a8083063 Iustin Pop
    else:
681 8d1a2a64 Michael Hanselmann
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
682 8d1a2a64 Michael Hanselmann
                                            constants.MIN_VG_SIZE)
683 a8083063 Iustin Pop
      if vgstatus:
684 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
685 a8083063 Iustin Pop
        bad = True
686 a8083063 Iustin Pop
687 a8083063 Iustin Pop
    # checks config file checksum
688 a8083063 Iustin Pop
689 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
690 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
691 a8083063 Iustin Pop
      bad = True
692 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
693 a8083063 Iustin Pop
    else:
694 a8083063 Iustin Pop
      for file_name in file_list:
695 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
696 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
697 a8083063 Iustin Pop
        if file_name not in remote_cksum:
698 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
699 112f18a5 Iustin Pop
            bad = True
700 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
701 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
702 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
703 112f18a5 Iustin Pop
            bad = True
704 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
705 112f18a5 Iustin Pop
          else:
706 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
707 112f18a5 Iustin Pop
            bad = True
708 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
709 112f18a5 Iustin Pop
                        " '%s'" % file_name)
710 112f18a5 Iustin Pop
        else:
711 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
712 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
713 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
714 112f18a5 Iustin Pop
                        " candidates" % file_name)
715 a8083063 Iustin Pop
716 25361b9a Iustin Pop
    # checks ssh to any
717 25361b9a Iustin Pop
718 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
719 a8083063 Iustin Pop
      bad = True
720 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
721 a8083063 Iustin Pop
    else:
722 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
723 a8083063 Iustin Pop
        bad = True
724 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
725 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
726 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
727 25361b9a Iustin Pop
728 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
729 9d4bfc96 Iustin Pop
      bad = True
730 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
731 9d4bfc96 Iustin Pop
    else:
732 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
733 9d4bfc96 Iustin Pop
        bad = True
734 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
735 9d4bfc96 Iustin Pop
        for node in nlist:
736 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
737 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
738 9d4bfc96 Iustin Pop
739 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
740 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
741 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
742 e69d05fd Iustin Pop
        if hv_result is not None:
743 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
744 e69d05fd Iustin Pop
                      (hv_name, hv_result))
745 6d2e83d5 Iustin Pop
746 6d2e83d5 Iustin Pop
    # check used drbd list
747 6d2e83d5 Iustin Pop
    used_minors = node_result.get(constants.NV_DRBDLIST, [])
748 6d2e83d5 Iustin Pop
    for minor, (iname, must_exist) in drbd_map.items():
749 6d2e83d5 Iustin Pop
      if minor not in used_minors and must_exist:
750 6d2e83d5 Iustin Pop
        feedback_fn("  - ERROR: drbd minor %d of instance %s is not active" %
751 6d2e83d5 Iustin Pop
                    (minor, iname))
752 6d2e83d5 Iustin Pop
        bad = True
753 6d2e83d5 Iustin Pop
    for minor in used_minors:
754 6d2e83d5 Iustin Pop
      if minor not in drbd_map:
755 6d2e83d5 Iustin Pop
        feedback_fn("  - ERROR: unallocated drbd minor %d is in use" % minor)
756 6d2e83d5 Iustin Pop
        bad = True
757 6d2e83d5 Iustin Pop
758 a8083063 Iustin Pop
    return bad
759 a8083063 Iustin Pop
760 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
761 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
762 a8083063 Iustin Pop
    """Verify an instance.
763 a8083063 Iustin Pop

764 a8083063 Iustin Pop
    This function checks to see if the required block devices are
765 a8083063 Iustin Pop
    available on the instance's node.
766 a8083063 Iustin Pop

767 a8083063 Iustin Pop
    """
768 a8083063 Iustin Pop
    bad = False
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
771 a8083063 Iustin Pop
772 a8083063 Iustin Pop
    node_vol_should = {}
773 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
774 a8083063 Iustin Pop
775 a8083063 Iustin Pop
    for node in node_vol_should:
776 0a66c968 Iustin Pop
      if node in n_offline:
777 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
778 0a66c968 Iustin Pop
        continue
779 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
780 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
781 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
782 a8083063 Iustin Pop
                          (volume, node))
783 a8083063 Iustin Pop
          bad = True
784 a8083063 Iustin Pop
785 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
786 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
787 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
788 0a66c968 Iustin Pop
          node_current not in n_offline):
789 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
790 a8083063 Iustin Pop
                        (instance, node_current))
791 a8083063 Iustin Pop
        bad = True
792 a8083063 Iustin Pop
793 a8083063 Iustin Pop
    for node in node_instance:
794 a8083063 Iustin Pop
      if (not node == node_current):
795 a8083063 Iustin Pop
        if instance in node_instance[node]:
796 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
797 a8083063 Iustin Pop
                          (instance, node))
798 a8083063 Iustin Pop
          bad = True
799 a8083063 Iustin Pop
800 6a438c98 Michael Hanselmann
    return bad
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
803 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
804 a8083063 Iustin Pop

805 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
806 a8083063 Iustin Pop
    reported as unknown.
807 a8083063 Iustin Pop

808 a8083063 Iustin Pop
    """
809 a8083063 Iustin Pop
    bad = False
810 a8083063 Iustin Pop
811 a8083063 Iustin Pop
    for node in node_vol_is:
812 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
813 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
814 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
815 a8083063 Iustin Pop
                      (volume, node))
816 a8083063 Iustin Pop
          bad = True
817 a8083063 Iustin Pop
    return bad
818 a8083063 Iustin Pop
819 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
820 a8083063 Iustin Pop
    """Verify the list of running instances.
821 a8083063 Iustin Pop

822 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
823 a8083063 Iustin Pop

824 a8083063 Iustin Pop
    """
825 a8083063 Iustin Pop
    bad = False
826 a8083063 Iustin Pop
    for node in node_instance:
827 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
828 a8083063 Iustin Pop
        if runninginstance not in instancelist:
829 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
830 a8083063 Iustin Pop
                          (runninginstance, node))
831 a8083063 Iustin Pop
          bad = True
832 a8083063 Iustin Pop
    return bad
833 a8083063 Iustin Pop
834 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
835 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
836 2b3b6ddd Guido Trotter

837 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
838 2b3b6ddd Guido Trotter
    was primary for.
839 2b3b6ddd Guido Trotter

840 2b3b6ddd Guido Trotter
    """
841 2b3b6ddd Guido Trotter
    bad = False
842 2b3b6ddd Guido Trotter
843 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
844 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
845 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
846 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
847 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
848 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
849 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
850 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
851 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
852 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
853 2b3b6ddd Guido Trotter
        needed_mem = 0
854 2b3b6ddd Guido Trotter
        for instance in instances:
855 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
856 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
857 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
858 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
859 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
860 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
861 2b3b6ddd Guido Trotter
          bad = True
862 2b3b6ddd Guido Trotter
    return bad
863 2b3b6ddd Guido Trotter
864 a8083063 Iustin Pop
  def CheckPrereq(self):
865 a8083063 Iustin Pop
    """Check prerequisites.
866 a8083063 Iustin Pop

867 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
868 e54c4c5e Guido Trotter
    all its members are valid.
869 a8083063 Iustin Pop

870 a8083063 Iustin Pop
    """
871 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
872 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
873 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
874 a8083063 Iustin Pop
875 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
876 d8fff41c Guido Trotter
    """Build hooks env.
877 d8fff41c Guido Trotter

878 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
879 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
880 d8fff41c Guido Trotter

881 d8fff41c Guido Trotter
    """
882 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
883 d8fff41c Guido Trotter
    # TODO: populate the environment with useful information for verify hooks
884 d8fff41c Guido Trotter
    env = {}
885 d8fff41c Guido Trotter
    return env, [], all_nodes
886 d8fff41c Guido Trotter
887 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
888 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
889 a8083063 Iustin Pop

890 a8083063 Iustin Pop
    """
891 a8083063 Iustin Pop
    bad = False
892 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
893 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
894 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
895 a8083063 Iustin Pop
896 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
897 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
898 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
899 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
900 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
901 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
902 6d2e83d5 Iustin Pop
                        for iname in instancelist)
903 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
904 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
905 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
906 a8083063 Iustin Pop
    node_volume = {}
907 a8083063 Iustin Pop
    node_instance = {}
908 9c9c7d30 Guido Trotter
    node_info = {}
909 26b6af5e Guido Trotter
    instance_cfg = {}
910 a8083063 Iustin Pop
911 a8083063 Iustin Pop
    # FIXME: verify OS list
912 a8083063 Iustin Pop
    # do local checksums
913 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
914 112f18a5 Iustin Pop
915 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
916 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
917 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
918 112f18a5 Iustin Pop
    file_names.extend(master_files)
919 112f18a5 Iustin Pop
920 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
921 a8083063 Iustin Pop
922 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
923 a8083063 Iustin Pop
    node_verify_param = {
924 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
925 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
926 82e37788 Iustin Pop
                              if not node.offline],
927 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
928 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
929 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
930 82e37788 Iustin Pop
                                 if not node.offline],
931 25361b9a Iustin Pop
      constants.NV_LVLIST: vg_name,
932 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
933 25361b9a Iustin Pop
      constants.NV_VGLIST: None,
934 25361b9a Iustin Pop
      constants.NV_VERSION: None,
935 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
936 6d2e83d5 Iustin Pop
      constants.NV_DRBDLIST: None,
937 a8083063 Iustin Pop
      }
938 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
939 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
940 a8083063 Iustin Pop
941 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
942 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
943 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
944 6d2e83d5 Iustin Pop
945 112f18a5 Iustin Pop
    for node_i in nodeinfo:
946 112f18a5 Iustin Pop
      node = node_i.name
947 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
948 25361b9a Iustin Pop
949 0a66c968 Iustin Pop
      if node_i.offline:
950 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
951 0a66c968 Iustin Pop
        n_offline.append(node)
952 0a66c968 Iustin Pop
        continue
953 0a66c968 Iustin Pop
954 112f18a5 Iustin Pop
      if node == master_node:
955 25361b9a Iustin Pop
        ntype = "master"
956 112f18a5 Iustin Pop
      elif node_i.master_candidate:
957 25361b9a Iustin Pop
        ntype = "master candidate"
958 112f18a5 Iustin Pop
      else:
959 25361b9a Iustin Pop
        ntype = "regular"
960 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
961 25361b9a Iustin Pop
962 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
963 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
964 25361b9a Iustin Pop
        bad = True
965 25361b9a Iustin Pop
        continue
966 25361b9a Iustin Pop
967 6d2e83d5 Iustin Pop
      node_drbd = {}
968 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
969 6d2e83d5 Iustin Pop
        instance = instanceinfo[instance]
970 0d68c45d Iustin Pop
        node_drbd[minor] = (instance.name, instance.admin_up)
971 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
972 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
973 6d2e83d5 Iustin Pop
                                node_drbd)
974 a8083063 Iustin Pop
      bad = bad or result
975 a8083063 Iustin Pop
976 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
977 25361b9a Iustin Pop
      if isinstance(lvdata, basestring):
978 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
979 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
980 b63ed789 Iustin Pop
        bad = True
981 b63ed789 Iustin Pop
        node_volume[node] = {}
982 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
983 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
984 a8083063 Iustin Pop
        bad = True
985 a8083063 Iustin Pop
        continue
986 b63ed789 Iustin Pop
      else:
987 25361b9a Iustin Pop
        node_volume[node] = lvdata
988 a8083063 Iustin Pop
989 a8083063 Iustin Pop
      # node_instance
990 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
991 25361b9a Iustin Pop
      if not isinstance(idata, list):
992 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
993 25361b9a Iustin Pop
                    (node,))
994 a8083063 Iustin Pop
        bad = True
995 a8083063 Iustin Pop
        continue
996 a8083063 Iustin Pop
997 25361b9a Iustin Pop
      node_instance[node] = idata
998 a8083063 Iustin Pop
999 9c9c7d30 Guido Trotter
      # node_info
1000 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1001 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1002 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1003 9c9c7d30 Guido Trotter
        bad = True
1004 9c9c7d30 Guido Trotter
        continue
1005 9c9c7d30 Guido Trotter
1006 9c9c7d30 Guido Trotter
      try:
1007 9c9c7d30 Guido Trotter
        node_info[node] = {
1008 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1009 25361b9a Iustin Pop
          "dfree": int(nresult[constants.NV_VGLIST][vg_name]),
1010 93e4c50b Guido Trotter
          "pinst": [],
1011 93e4c50b Guido Trotter
          "sinst": [],
1012 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1013 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1014 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1015 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1016 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1017 36e7da50 Guido Trotter
          # secondary.
1018 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1019 9c9c7d30 Guido Trotter
        }
1020 9c9c7d30 Guido Trotter
      except ValueError:
1021 9c9c7d30 Guido Trotter
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
1022 9c9c7d30 Guido Trotter
        bad = True
1023 9c9c7d30 Guido Trotter
        continue
1024 9c9c7d30 Guido Trotter
1025 a8083063 Iustin Pop
    node_vol_should = {}
1026 a8083063 Iustin Pop
1027 a8083063 Iustin Pop
    for instance in instancelist:
1028 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1029 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1030 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1031 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1032 c5705f58 Guido Trotter
      bad = bad or result
1033 832261fd Iustin Pop
      inst_nodes_offline = []
1034 a8083063 Iustin Pop
1035 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1036 a8083063 Iustin Pop
1037 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1038 26b6af5e Guido Trotter
1039 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1040 93e4c50b Guido Trotter
      if pnode in node_info:
1041 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1042 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1043 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1044 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1045 93e4c50b Guido Trotter
        bad = True
1046 93e4c50b Guido Trotter
1047 832261fd Iustin Pop
      if pnode in n_offline:
1048 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1049 832261fd Iustin Pop
1050 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1051 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1052 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1053 93e4c50b Guido Trotter
      # supported either.
1054 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1055 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1056 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1057 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1058 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1059 93e4c50b Guido Trotter
                    % instance)
1060 93e4c50b Guido Trotter
1061 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1062 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1063 3924700f Iustin Pop
1064 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1065 93e4c50b Guido Trotter
        if snode in node_info:
1066 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1067 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1068 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1069 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1070 0a66c968 Iustin Pop
        elif snode not in n_offline:
1071 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1072 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1073 832261fd Iustin Pop
          bad = True
1074 832261fd Iustin Pop
        if snode in n_offline:
1075 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1076 832261fd Iustin Pop
1077 832261fd Iustin Pop
      if inst_nodes_offline:
1078 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1079 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1080 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1081 832261fd Iustin Pop
        bad = True
1082 93e4c50b Guido Trotter
1083 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1084 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1085 a8083063 Iustin Pop
                                       feedback_fn)
1086 a8083063 Iustin Pop
    bad = bad or result
1087 a8083063 Iustin Pop
1088 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1089 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1090 a8083063 Iustin Pop
                                         feedback_fn)
1091 a8083063 Iustin Pop
    bad = bad or result
1092 a8083063 Iustin Pop
1093 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1094 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1095 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1096 e54c4c5e Guido Trotter
      bad = bad or result
1097 2b3b6ddd Guido Trotter
1098 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1099 2b3b6ddd Guido Trotter
    if i_non_redundant:
1100 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1101 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1102 2b3b6ddd Guido Trotter
1103 3924700f Iustin Pop
    if i_non_a_balanced:
1104 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1105 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1106 3924700f Iustin Pop
1107 0a66c968 Iustin Pop
    if n_offline:
1108 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1109 0a66c968 Iustin Pop
1110 34290825 Michael Hanselmann
    return not bad
1111 a8083063 Iustin Pop
1112 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1113 e4376078 Iustin Pop
    """Analize the post-hooks' result
1114 e4376078 Iustin Pop

1115 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1116 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1117 d8fff41c Guido Trotter

1118 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1119 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1120 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1121 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1122 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1123 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1124 e4376078 Iustin Pop
        and hook results
1125 d8fff41c Guido Trotter

1126 d8fff41c Guido Trotter
    """
1127 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1128 38206f3c Iustin Pop
    # their results
1129 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1130 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1131 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1132 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1133 d8fff41c Guido Trotter
      if not hooks_results:
1134 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1135 d8fff41c Guido Trotter
        lu_result = 1
1136 d8fff41c Guido Trotter
      else:
1137 d8fff41c Guido Trotter
        for node_name in hooks_results:
1138 d8fff41c Guido Trotter
          show_node_header = True
1139 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1140 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1141 0a66c968 Iustin Pop
            if res.offline:
1142 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1143 0a66c968 Iustin Pop
              continue
1144 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1145 d8fff41c Guido Trotter
            lu_result = 1
1146 d8fff41c Guido Trotter
            continue
1147 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1148 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1149 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1150 d8fff41c Guido Trotter
              # failing hooks on that node
1151 d8fff41c Guido Trotter
              if show_node_header:
1152 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1153 d8fff41c Guido Trotter
                show_node_header = False
1154 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1155 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1156 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1157 d8fff41c Guido Trotter
              lu_result = 1
1158 d8fff41c Guido Trotter
1159 d8fff41c Guido Trotter
      return lu_result
1160 d8fff41c Guido Trotter
1161 a8083063 Iustin Pop
1162 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1163 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1164 2c95a8d4 Iustin Pop

1165 2c95a8d4 Iustin Pop
  """
1166 2c95a8d4 Iustin Pop
  _OP_REQP = []
1167 d4b9d97f Guido Trotter
  REQ_BGL = False
1168 d4b9d97f Guido Trotter
1169 d4b9d97f Guido Trotter
  def ExpandNames(self):
1170 d4b9d97f Guido Trotter
    self.needed_locks = {
1171 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1172 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1173 d4b9d97f Guido Trotter
    }
1174 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1175 2c95a8d4 Iustin Pop
1176 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1177 2c95a8d4 Iustin Pop
    """Check prerequisites.
1178 2c95a8d4 Iustin Pop

1179 2c95a8d4 Iustin Pop
    This has no prerequisites.
1180 2c95a8d4 Iustin Pop

1181 2c95a8d4 Iustin Pop
    """
1182 2c95a8d4 Iustin Pop
    pass
1183 2c95a8d4 Iustin Pop
1184 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1185 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1186 2c95a8d4 Iustin Pop

1187 2c95a8d4 Iustin Pop
    """
1188 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1189 2c95a8d4 Iustin Pop
1190 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1191 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1192 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1193 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1194 2c95a8d4 Iustin Pop
1195 2c95a8d4 Iustin Pop
    nv_dict = {}
1196 2c95a8d4 Iustin Pop
    for inst in instances:
1197 2c95a8d4 Iustin Pop
      inst_lvs = {}
1198 0d68c45d Iustin Pop
      if (not inst.admin_up or
1199 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1200 2c95a8d4 Iustin Pop
        continue
1201 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1202 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1203 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1204 2c95a8d4 Iustin Pop
        for vol in vol_list:
1205 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1206 2c95a8d4 Iustin Pop
1207 2c95a8d4 Iustin Pop
    if not nv_dict:
1208 2c95a8d4 Iustin Pop
      return result
1209 2c95a8d4 Iustin Pop
1210 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1211 2c95a8d4 Iustin Pop
1212 2c95a8d4 Iustin Pop
    to_act = set()
1213 2c95a8d4 Iustin Pop
    for node in nodes:
1214 2c95a8d4 Iustin Pop
      # node_volume
1215 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1216 781de953 Iustin Pop
      if lvs.failed:
1217 0a66c968 Iustin Pop
        if not lvs.offline:
1218 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1219 0a66c968 Iustin Pop
                          (node, lvs.data))
1220 781de953 Iustin Pop
        continue
1221 781de953 Iustin Pop
      lvs = lvs.data
1222 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1223 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1224 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1225 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1226 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1227 9a4f63d1 Iustin Pop
                        " returned", node)
1228 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1229 2c95a8d4 Iustin Pop
        continue
1230 2c95a8d4 Iustin Pop
1231 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1232 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1233 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1234 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1235 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1236 2c95a8d4 Iustin Pop
1237 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1238 b63ed789 Iustin Pop
    # data better
1239 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1240 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1241 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1242 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1243 b63ed789 Iustin Pop
1244 2c95a8d4 Iustin Pop
    return result
1245 2c95a8d4 Iustin Pop
1246 2c95a8d4 Iustin Pop
1247 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1248 07bd8a51 Iustin Pop
  """Rename the cluster.
1249 07bd8a51 Iustin Pop

1250 07bd8a51 Iustin Pop
  """
1251 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1252 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1253 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1254 07bd8a51 Iustin Pop
1255 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1256 07bd8a51 Iustin Pop
    """Build hooks env.
1257 07bd8a51 Iustin Pop

1258 07bd8a51 Iustin Pop
    """
1259 07bd8a51 Iustin Pop
    env = {
1260 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1261 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1262 07bd8a51 Iustin Pop
      }
1263 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1264 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1265 07bd8a51 Iustin Pop
1266 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1267 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1268 07bd8a51 Iustin Pop

1269 07bd8a51 Iustin Pop
    """
1270 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1271 07bd8a51 Iustin Pop
1272 bcf043c9 Iustin Pop
    new_name = hostname.name
1273 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1274 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1275 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1276 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1277 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1278 07bd8a51 Iustin Pop
                                 " cluster has changed")
1279 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1280 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1281 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1282 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1283 07bd8a51 Iustin Pop
                                   new_ip)
1284 07bd8a51 Iustin Pop
1285 07bd8a51 Iustin Pop
    self.op.name = new_name
1286 07bd8a51 Iustin Pop
1287 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1288 07bd8a51 Iustin Pop
    """Rename the cluster.
1289 07bd8a51 Iustin Pop

1290 07bd8a51 Iustin Pop
    """
1291 07bd8a51 Iustin Pop
    clustername = self.op.name
1292 07bd8a51 Iustin Pop
    ip = self.ip
1293 07bd8a51 Iustin Pop
1294 07bd8a51 Iustin Pop
    # shutdown the master IP
1295 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1296 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1297 781de953 Iustin Pop
    if result.failed or not result.data:
1298 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1299 07bd8a51 Iustin Pop
1300 07bd8a51 Iustin Pop
    try:
1301 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1302 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1303 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1304 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1305 ec85e3d5 Iustin Pop
1306 ec85e3d5 Iustin Pop
      # update the known hosts file
1307 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1308 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1309 ec85e3d5 Iustin Pop
      try:
1310 ec85e3d5 Iustin Pop
        node_list.remove(master)
1311 ec85e3d5 Iustin Pop
      except ValueError:
1312 ec85e3d5 Iustin Pop
        pass
1313 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1314 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1315 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1316 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1317 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1318 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1319 ec85e3d5 Iustin Pop
1320 07bd8a51 Iustin Pop
    finally:
1321 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1322 781de953 Iustin Pop
      if result.failed or not result.data:
1323 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1324 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1325 07bd8a51 Iustin Pop
1326 07bd8a51 Iustin Pop
1327 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1328 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1329 8084f9f6 Manuel Franceschini

1330 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1331 e4376078 Iustin Pop
  @param disk: the disk to check
1332 e4376078 Iustin Pop
  @rtype: booleean
1333 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1334 8084f9f6 Manuel Franceschini

1335 8084f9f6 Manuel Franceschini
  """
1336 8084f9f6 Manuel Franceschini
  if disk.children:
1337 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1338 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1339 8084f9f6 Manuel Franceschini
        return True
1340 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1341 8084f9f6 Manuel Franceschini
1342 8084f9f6 Manuel Franceschini
1343 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1344 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1345 8084f9f6 Manuel Franceschini

1346 8084f9f6 Manuel Franceschini
  """
1347 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1348 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1349 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1350 c53279cf Guido Trotter
  REQ_BGL = False
1351 c53279cf Guido Trotter
1352 4b7735f9 Iustin Pop
  def CheckParameters(self):
1353 4b7735f9 Iustin Pop
    """Check parameters
1354 4b7735f9 Iustin Pop

1355 4b7735f9 Iustin Pop
    """
1356 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1357 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1358 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1359 4b7735f9 Iustin Pop
      try:
1360 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1361 4b7735f9 Iustin Pop
      except ValueError, err:
1362 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1363 4b7735f9 Iustin Pop
                                   str(err))
1364 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1365 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1366 4b7735f9 Iustin Pop
1367 c53279cf Guido Trotter
  def ExpandNames(self):
1368 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1369 c53279cf Guido Trotter
    # all nodes to be modified.
1370 c53279cf Guido Trotter
    self.needed_locks = {
1371 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1372 c53279cf Guido Trotter
    }
1373 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1374 8084f9f6 Manuel Franceschini
1375 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1376 8084f9f6 Manuel Franceschini
    """Build hooks env.
1377 8084f9f6 Manuel Franceschini

1378 8084f9f6 Manuel Franceschini
    """
1379 8084f9f6 Manuel Franceschini
    env = {
1380 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1381 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1382 8084f9f6 Manuel Franceschini
      }
1383 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1384 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1385 8084f9f6 Manuel Franceschini
1386 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1387 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1388 8084f9f6 Manuel Franceschini

1389 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1390 5f83e263 Iustin Pop
    if the given volume group is valid.
1391 8084f9f6 Manuel Franceschini

1392 8084f9f6 Manuel Franceschini
    """
1393 c53279cf Guido Trotter
    # FIXME: This only works because there is only one parameter that can be
1394 c53279cf Guido Trotter
    # changed or removed.
1395 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1396 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1397 8084f9f6 Manuel Franceschini
      for inst in instances:
1398 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1399 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1400 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1401 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1402 8084f9f6 Manuel Franceschini
1403 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1404 779c15bb Iustin Pop
1405 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1406 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1407 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1408 8084f9f6 Manuel Franceschini
      for node in node_list:
1409 781de953 Iustin Pop
        if vglist[node].failed:
1410 781de953 Iustin Pop
          # ignoring down node
1411 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1412 781de953 Iustin Pop
          continue
1413 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1414 781de953 Iustin Pop
                                              self.op.vg_name,
1415 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1416 8084f9f6 Manuel Franceschini
        if vgstatus:
1417 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1418 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1419 8084f9f6 Manuel Franceschini
1420 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1421 d4b72030 Guido Trotter
    # validate beparams changes
1422 779c15bb Iustin Pop
    if self.op.beparams:
1423 d4b72030 Guido Trotter
      utils.CheckBEParams(self.op.beparams)
1424 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1425 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1426 779c15bb Iustin Pop
1427 779c15bb Iustin Pop
    # hypervisor list/parameters
1428 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1429 779c15bb Iustin Pop
    if self.op.hvparams:
1430 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1431 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1432 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1433 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1434 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1435 779c15bb Iustin Pop
        else:
1436 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1437 779c15bb Iustin Pop
1438 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1439 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1440 779c15bb Iustin Pop
    else:
1441 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1442 779c15bb Iustin Pop
1443 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1444 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1445 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1446 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1447 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1448 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1449 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1450 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1451 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1452 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1453 779c15bb Iustin Pop
1454 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1455 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1456 8084f9f6 Manuel Franceschini

1457 8084f9f6 Manuel Franceschini
    """
1458 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1459 779c15bb Iustin Pop
      if self.op.vg_name != self.cfg.GetVGName():
1460 779c15bb Iustin Pop
        self.cfg.SetVGName(self.op.vg_name)
1461 779c15bb Iustin Pop
      else:
1462 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1463 779c15bb Iustin Pop
                    " state, not changing")
1464 779c15bb Iustin Pop
    if self.op.hvparams:
1465 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1466 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1467 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1468 779c15bb Iustin Pop
    if self.op.beparams:
1469 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1470 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1471 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1472 4b7735f9 Iustin Pop
1473 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1474 8084f9f6 Manuel Franceschini
1475 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1476 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1477 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1478 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1479 4b7735f9 Iustin Pop
1480 8084f9f6 Manuel Franceschini
1481 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1482 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1483 afee0879 Iustin Pop

1484 afee0879 Iustin Pop
  This is a very simple LU.
1485 afee0879 Iustin Pop

1486 afee0879 Iustin Pop
  """
1487 afee0879 Iustin Pop
  _OP_REQP = []
1488 afee0879 Iustin Pop
  REQ_BGL = False
1489 afee0879 Iustin Pop
1490 afee0879 Iustin Pop
  def ExpandNames(self):
1491 afee0879 Iustin Pop
    self.needed_locks = {
1492 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1493 afee0879 Iustin Pop
    }
1494 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1495 afee0879 Iustin Pop
1496 afee0879 Iustin Pop
  def CheckPrereq(self):
1497 afee0879 Iustin Pop
    """Check prerequisites.
1498 afee0879 Iustin Pop

1499 afee0879 Iustin Pop
    """
1500 afee0879 Iustin Pop
1501 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1502 afee0879 Iustin Pop
    """Redistribute the configuration.
1503 afee0879 Iustin Pop

1504 afee0879 Iustin Pop
    """
1505 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1506 afee0879 Iustin Pop
1507 afee0879 Iustin Pop
1508 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1509 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1510 a8083063 Iustin Pop

1511 a8083063 Iustin Pop
  """
1512 a8083063 Iustin Pop
  if not instance.disks:
1513 a8083063 Iustin Pop
    return True
1514 a8083063 Iustin Pop
1515 a8083063 Iustin Pop
  if not oneshot:
1516 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1517 a8083063 Iustin Pop
1518 a8083063 Iustin Pop
  node = instance.primary_node
1519 a8083063 Iustin Pop
1520 a8083063 Iustin Pop
  for dev in instance.disks:
1521 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1522 a8083063 Iustin Pop
1523 a8083063 Iustin Pop
  retries = 0
1524 a8083063 Iustin Pop
  while True:
1525 a8083063 Iustin Pop
    max_time = 0
1526 a8083063 Iustin Pop
    done = True
1527 a8083063 Iustin Pop
    cumul_degraded = False
1528 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1529 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1530 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1531 a8083063 Iustin Pop
      retries += 1
1532 a8083063 Iustin Pop
      if retries >= 10:
1533 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1534 3ecf6786 Iustin Pop
                                 " aborting." % node)
1535 a8083063 Iustin Pop
      time.sleep(6)
1536 a8083063 Iustin Pop
      continue
1537 781de953 Iustin Pop
    rstats = rstats.data
1538 a8083063 Iustin Pop
    retries = 0
1539 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1540 a8083063 Iustin Pop
      if mstat is None:
1541 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1542 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1543 a8083063 Iustin Pop
        continue
1544 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1545 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1546 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1547 a8083063 Iustin Pop
      if perc_done is not None:
1548 a8083063 Iustin Pop
        done = False
1549 a8083063 Iustin Pop
        if est_time is not None:
1550 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1551 a8083063 Iustin Pop
          max_time = est_time
1552 a8083063 Iustin Pop
        else:
1553 a8083063 Iustin Pop
          rem_time = "no time estimate"
1554 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1555 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1556 a8083063 Iustin Pop
    if done or oneshot:
1557 a8083063 Iustin Pop
      break
1558 a8083063 Iustin Pop
1559 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1560 a8083063 Iustin Pop
1561 a8083063 Iustin Pop
  if done:
1562 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1563 a8083063 Iustin Pop
  return not cumul_degraded
1564 a8083063 Iustin Pop
1565 a8083063 Iustin Pop
1566 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1567 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1568 a8083063 Iustin Pop

1569 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1570 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1571 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1572 0834c866 Iustin Pop

1573 a8083063 Iustin Pop
  """
1574 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1575 0834c866 Iustin Pop
  if ldisk:
1576 0834c866 Iustin Pop
    idx = 6
1577 0834c866 Iustin Pop
  else:
1578 0834c866 Iustin Pop
    idx = 5
1579 a8083063 Iustin Pop
1580 a8083063 Iustin Pop
  result = True
1581 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1582 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1583 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1584 23829f6f Iustin Pop
    if msg:
1585 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1586 23829f6f Iustin Pop
      result = False
1587 23829f6f Iustin Pop
    elif not rstats.payload:
1588 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1589 a8083063 Iustin Pop
      result = False
1590 a8083063 Iustin Pop
    else:
1591 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1592 a8083063 Iustin Pop
  if dev.children:
1593 a8083063 Iustin Pop
    for child in dev.children:
1594 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1595 a8083063 Iustin Pop
1596 a8083063 Iustin Pop
  return result
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
1599 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1600 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1601 a8083063 Iustin Pop

1602 a8083063 Iustin Pop
  """
1603 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1604 6bf01bbb Guido Trotter
  REQ_BGL = False
1605 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1606 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1607 a8083063 Iustin Pop
1608 6bf01bbb Guido Trotter
  def ExpandNames(self):
1609 1f9430d6 Iustin Pop
    if self.op.names:
1610 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1611 1f9430d6 Iustin Pop
1612 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1613 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1614 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1615 1f9430d6 Iustin Pop
1616 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1617 6bf01bbb Guido Trotter
    self.needed_locks = {}
1618 6bf01bbb Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1619 e310b019 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1620 6bf01bbb Guido Trotter
1621 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1622 6bf01bbb Guido Trotter
    """Check prerequisites.
1623 6bf01bbb Guido Trotter

1624 6bf01bbb Guido Trotter
    """
1625 6bf01bbb Guido Trotter
1626 1f9430d6 Iustin Pop
  @staticmethod
1627 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1628 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1629 1f9430d6 Iustin Pop

1630 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1631 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1632 1f9430d6 Iustin Pop

1633 e4376078 Iustin Pop
    @rtype: dict
1634 e4376078 Iustin Pop
    @returns: a dictionary with osnames as keys and as value another map, with
1635 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1636 e4376078 Iustin Pop

1637 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1638 e4376078 Iustin Pop
                           "node2": [<object>,]}
1639 e4376078 Iustin Pop
          }
1640 1f9430d6 Iustin Pop

1641 1f9430d6 Iustin Pop
    """
1642 1f9430d6 Iustin Pop
    all_os = {}
1643 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1644 781de953 Iustin Pop
      if nr.failed or not nr.data:
1645 1f9430d6 Iustin Pop
        continue
1646 781de953 Iustin Pop
      for os_obj in nr.data:
1647 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1648 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1649 1f9430d6 Iustin Pop
          # for each node in node_list
1650 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1651 1f9430d6 Iustin Pop
          for nname in node_list:
1652 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1653 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1654 1f9430d6 Iustin Pop
    return all_os
1655 a8083063 Iustin Pop
1656 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1657 a8083063 Iustin Pop
    """Compute the list of OSes.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
    """
1660 6bf01bbb Guido Trotter
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1661 94a02bb5 Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()
1662 94a02bb5 Iustin Pop
                   if node in node_list]
1663 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1664 a8083063 Iustin Pop
    if node_data == False:
1665 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1666 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1667 1f9430d6 Iustin Pop
    output = []
1668 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1669 1f9430d6 Iustin Pop
      row = []
1670 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1671 1f9430d6 Iustin Pop
        if field == "name":
1672 1f9430d6 Iustin Pop
          val = os_name
1673 1f9430d6 Iustin Pop
        elif field == "valid":
1674 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1675 1f9430d6 Iustin Pop
        elif field == "node_status":
1676 1f9430d6 Iustin Pop
          val = {}
1677 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1678 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1679 1f9430d6 Iustin Pop
        else:
1680 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1681 1f9430d6 Iustin Pop
        row.append(val)
1682 1f9430d6 Iustin Pop
      output.append(row)
1683 1f9430d6 Iustin Pop
1684 1f9430d6 Iustin Pop
    return output
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1688 a8083063 Iustin Pop
  """Logical unit for removing a node.
1689 a8083063 Iustin Pop

1690 a8083063 Iustin Pop
  """
1691 a8083063 Iustin Pop
  HPATH = "node-remove"
1692 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1693 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1694 a8083063 Iustin Pop
1695 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1696 a8083063 Iustin Pop
    """Build hooks env.
1697 a8083063 Iustin Pop

1698 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1699 d08869ee Guido Trotter
    node would then be impossible to remove.
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
    """
1702 396e1b78 Michael Hanselmann
    env = {
1703 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1704 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1705 396e1b78 Michael Hanselmann
      }
1706 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1707 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1708 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1709 a8083063 Iustin Pop
1710 a8083063 Iustin Pop
  def CheckPrereq(self):
1711 a8083063 Iustin Pop
    """Check prerequisites.
1712 a8083063 Iustin Pop

1713 a8083063 Iustin Pop
    This checks:
1714 a8083063 Iustin Pop
     - the node exists in the configuration
1715 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1716 a8083063 Iustin Pop
     - it's not the master
1717 a8083063 Iustin Pop

1718 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
    """
1721 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1722 a8083063 Iustin Pop
    if node is None:
1723 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1724 a8083063 Iustin Pop
1725 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1726 a8083063 Iustin Pop
1727 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1728 a8083063 Iustin Pop
    if node.name == masternode:
1729 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1730 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
    for instance_name in instance_list:
1733 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1734 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1735 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1736 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1737 a8083063 Iustin Pop
    self.op.node_name = node.name
1738 a8083063 Iustin Pop
    self.node = node
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1741 a8083063 Iustin Pop
    """Removes the node from the cluster.
1742 a8083063 Iustin Pop

1743 a8083063 Iustin Pop
    """
1744 a8083063 Iustin Pop
    node = self.node
1745 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1746 9a4f63d1 Iustin Pop
                 node.name)
1747 a8083063 Iustin Pop
1748 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1749 a8083063 Iustin Pop
1750 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1751 c8a0948f Michael Hanselmann
1752 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1753 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1754 eb1742d5 Guido Trotter
1755 a8083063 Iustin Pop
1756 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1757 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1758 a8083063 Iustin Pop

1759 a8083063 Iustin Pop
  """
1760 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1761 35705d8f Guido Trotter
  REQ_BGL = False
1762 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1763 31bf511f Iustin Pop
    "dtotal", "dfree",
1764 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1765 31bf511f Iustin Pop
    "bootid",
1766 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1767 31bf511f Iustin Pop
    )
1768 31bf511f Iustin Pop
1769 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1770 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1771 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1772 31bf511f Iustin Pop
    "pip", "sip", "tags",
1773 31bf511f Iustin Pop
    "serial_no",
1774 0e67cdbe Iustin Pop
    "master_candidate",
1775 0e67cdbe Iustin Pop
    "master",
1776 9ddb5e45 Iustin Pop
    "offline",
1777 31bf511f Iustin Pop
    )
1778 a8083063 Iustin Pop
1779 35705d8f Guido Trotter
  def ExpandNames(self):
1780 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1781 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1782 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1783 a8083063 Iustin Pop
1784 35705d8f Guido Trotter
    self.needed_locks = {}
1785 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1786 c8d8b4c8 Iustin Pop
1787 c8d8b4c8 Iustin Pop
    if self.op.names:
1788 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1789 35705d8f Guido Trotter
    else:
1790 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1791 c8d8b4c8 Iustin Pop
1792 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1793 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1794 c8d8b4c8 Iustin Pop
    if self.do_locking:
1795 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1796 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1797 c8d8b4c8 Iustin Pop
1798 35705d8f Guido Trotter
1799 35705d8f Guido Trotter
  def CheckPrereq(self):
1800 35705d8f Guido Trotter
    """Check prerequisites.
1801 35705d8f Guido Trotter

1802 35705d8f Guido Trotter
    """
1803 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1804 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1805 c8d8b4c8 Iustin Pop
    pass
1806 a8083063 Iustin Pop
1807 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1808 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1809 a8083063 Iustin Pop

1810 a8083063 Iustin Pop
    """
1811 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1812 c8d8b4c8 Iustin Pop
    if self.do_locking:
1813 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1814 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1815 3fa93523 Guido Trotter
      nodenames = self.wanted
1816 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1817 3fa93523 Guido Trotter
      if missing:
1818 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1819 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1820 c8d8b4c8 Iustin Pop
    else:
1821 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1822 c1f1cbb2 Iustin Pop
1823 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1824 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1825 a8083063 Iustin Pop
1826 a8083063 Iustin Pop
    # begin data gathering
1827 a8083063 Iustin Pop
1828 bc8e4a1a Iustin Pop
    if self.do_node_query:
1829 a8083063 Iustin Pop
      live_data = {}
1830 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1831 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1832 a8083063 Iustin Pop
      for name in nodenames:
1833 781de953 Iustin Pop
        nodeinfo = node_data[name]
1834 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
1835 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
1836 d599d686 Iustin Pop
          fn = utils.TryConvert
1837 a8083063 Iustin Pop
          live_data[name] = {
1838 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1839 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1840 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1841 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1842 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1843 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1844 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1845 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1846 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1847 a8083063 Iustin Pop
            }
1848 a8083063 Iustin Pop
        else:
1849 a8083063 Iustin Pop
          live_data[name] = {}
1850 a8083063 Iustin Pop
    else:
1851 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1852 a8083063 Iustin Pop
1853 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1854 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1855 a8083063 Iustin Pop
1856 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1857 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1858 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1859 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1860 a8083063 Iustin Pop
1861 ec223efb Iustin Pop
      for instance_name in instancelist:
1862 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1863 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1864 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1865 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1866 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1867 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1868 a8083063 Iustin Pop
1869 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1870 0e67cdbe Iustin Pop
1871 a8083063 Iustin Pop
    # end data gathering
1872 a8083063 Iustin Pop
1873 a8083063 Iustin Pop
    output = []
1874 a8083063 Iustin Pop
    for node in nodelist:
1875 a8083063 Iustin Pop
      node_output = []
1876 a8083063 Iustin Pop
      for field in self.op.output_fields:
1877 a8083063 Iustin Pop
        if field == "name":
1878 a8083063 Iustin Pop
          val = node.name
1879 ec223efb Iustin Pop
        elif field == "pinst_list":
1880 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1881 ec223efb Iustin Pop
        elif field == "sinst_list":
1882 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1883 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1884 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1885 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1886 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1887 a8083063 Iustin Pop
        elif field == "pip":
1888 a8083063 Iustin Pop
          val = node.primary_ip
1889 a8083063 Iustin Pop
        elif field == "sip":
1890 a8083063 Iustin Pop
          val = node.secondary_ip
1891 130a6a6f Iustin Pop
        elif field == "tags":
1892 130a6a6f Iustin Pop
          val = list(node.GetTags())
1893 38d7239a Iustin Pop
        elif field == "serial_no":
1894 38d7239a Iustin Pop
          val = node.serial_no
1895 0e67cdbe Iustin Pop
        elif field == "master_candidate":
1896 0e67cdbe Iustin Pop
          val = node.master_candidate
1897 0e67cdbe Iustin Pop
        elif field == "master":
1898 0e67cdbe Iustin Pop
          val = node.name == master_node
1899 9ddb5e45 Iustin Pop
        elif field == "offline":
1900 9ddb5e45 Iustin Pop
          val = node.offline
1901 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
1902 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1903 a8083063 Iustin Pop
        else:
1904 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1905 a8083063 Iustin Pop
        node_output.append(val)
1906 a8083063 Iustin Pop
      output.append(node_output)
1907 a8083063 Iustin Pop
1908 a8083063 Iustin Pop
    return output
1909 a8083063 Iustin Pop
1910 a8083063 Iustin Pop
1911 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1912 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1913 dcb93971 Michael Hanselmann

1914 dcb93971 Michael Hanselmann
  """
1915 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1916 21a15682 Guido Trotter
  REQ_BGL = False
1917 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
1918 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
1919 21a15682 Guido Trotter
1920 21a15682 Guido Trotter
  def ExpandNames(self):
1921 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1922 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1923 21a15682 Guido Trotter
                       selected=self.op.output_fields)
1924 21a15682 Guido Trotter
1925 21a15682 Guido Trotter
    self.needed_locks = {}
1926 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1927 21a15682 Guido Trotter
    if not self.op.nodes:
1928 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1929 21a15682 Guido Trotter
    else:
1930 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
1931 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
1932 dcb93971 Michael Hanselmann
1933 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1934 dcb93971 Michael Hanselmann
    """Check prerequisites.
1935 dcb93971 Michael Hanselmann

1936 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1937 dcb93971 Michael Hanselmann

1938 dcb93971 Michael Hanselmann
    """
1939 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
1940 dcb93971 Michael Hanselmann
1941 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1942 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1943 dcb93971 Michael Hanselmann

1944 dcb93971 Michael Hanselmann
    """
1945 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1946 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
1947 dcb93971 Michael Hanselmann
1948 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1949 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1950 dcb93971 Michael Hanselmann
1951 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1952 dcb93971 Michael Hanselmann
1953 dcb93971 Michael Hanselmann
    output = []
1954 dcb93971 Michael Hanselmann
    for node in nodenames:
1955 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
1956 37d19eb2 Michael Hanselmann
        continue
1957 37d19eb2 Michael Hanselmann
1958 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
1959 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1960 dcb93971 Michael Hanselmann
1961 dcb93971 Michael Hanselmann
      for vol in node_vols:
1962 dcb93971 Michael Hanselmann
        node_output = []
1963 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1964 dcb93971 Michael Hanselmann
          if field == "node":
1965 dcb93971 Michael Hanselmann
            val = node
1966 dcb93971 Michael Hanselmann
          elif field == "phys":
1967 dcb93971 Michael Hanselmann
            val = vol['dev']
1968 dcb93971 Michael Hanselmann
          elif field == "vg":
1969 dcb93971 Michael Hanselmann
            val = vol['vg']
1970 dcb93971 Michael Hanselmann
          elif field == "name":
1971 dcb93971 Michael Hanselmann
            val = vol['name']
1972 dcb93971 Michael Hanselmann
          elif field == "size":
1973 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1974 dcb93971 Michael Hanselmann
          elif field == "instance":
1975 dcb93971 Michael Hanselmann
            for inst in ilist:
1976 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1977 dcb93971 Michael Hanselmann
                continue
1978 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1979 dcb93971 Michael Hanselmann
                val = inst.name
1980 dcb93971 Michael Hanselmann
                break
1981 dcb93971 Michael Hanselmann
            else:
1982 dcb93971 Michael Hanselmann
              val = '-'
1983 dcb93971 Michael Hanselmann
          else:
1984 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1985 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1986 dcb93971 Michael Hanselmann
1987 dcb93971 Michael Hanselmann
        output.append(node_output)
1988 dcb93971 Michael Hanselmann
1989 dcb93971 Michael Hanselmann
    return output
1990 dcb93971 Michael Hanselmann
1991 dcb93971 Michael Hanselmann
1992 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1993 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1994 a8083063 Iustin Pop

1995 a8083063 Iustin Pop
  """
1996 a8083063 Iustin Pop
  HPATH = "node-add"
1997 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1998 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1999 a8083063 Iustin Pop
2000 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2001 a8083063 Iustin Pop
    """Build hooks env.
2002 a8083063 Iustin Pop

2003 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2004 a8083063 Iustin Pop

2005 a8083063 Iustin Pop
    """
2006 a8083063 Iustin Pop
    env = {
2007 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2008 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2009 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2010 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2011 a8083063 Iustin Pop
      }
2012 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2013 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2014 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2015 a8083063 Iustin Pop
2016 a8083063 Iustin Pop
  def CheckPrereq(self):
2017 a8083063 Iustin Pop
    """Check prerequisites.
2018 a8083063 Iustin Pop

2019 a8083063 Iustin Pop
    This checks:
2020 a8083063 Iustin Pop
     - the new node is not already in the config
2021 a8083063 Iustin Pop
     - it is resolvable
2022 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2023 a8083063 Iustin Pop

2024 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2025 a8083063 Iustin Pop

2026 a8083063 Iustin Pop
    """
2027 a8083063 Iustin Pop
    node_name = self.op.node_name
2028 a8083063 Iustin Pop
    cfg = self.cfg
2029 a8083063 Iustin Pop
2030 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2031 a8083063 Iustin Pop
2032 bcf043c9 Iustin Pop
    node = dns_data.name
2033 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2034 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2035 a8083063 Iustin Pop
    if secondary_ip is None:
2036 a8083063 Iustin Pop
      secondary_ip = primary_ip
2037 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2038 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2039 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2040 e7c6e02b Michael Hanselmann
2041 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2042 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2043 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2044 e7c6e02b Michael Hanselmann
                                 node)
2045 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2046 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2047 a8083063 Iustin Pop
2048 a8083063 Iustin Pop
    for existing_node_name in node_list:
2049 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2050 e7c6e02b Michael Hanselmann
2051 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2052 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2053 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2054 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2055 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2056 e7c6e02b Michael Hanselmann
        continue
2057 e7c6e02b Michael Hanselmann
2058 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2059 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2060 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2061 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2062 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2063 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2066 a8083063 Iustin Pop
    # same as for the master
2067 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2068 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2069 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2070 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2071 a8083063 Iustin Pop
      if master_singlehomed:
2072 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2073 3ecf6786 Iustin Pop
                                   " new node has one")
2074 a8083063 Iustin Pop
      else:
2075 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2076 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2077 a8083063 Iustin Pop
2078 a8083063 Iustin Pop
    # checks reachablity
2079 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2080 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2081 a8083063 Iustin Pop
2082 a8083063 Iustin Pop
    if not newbie_singlehomed:
2083 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2084 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2085 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2086 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2087 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2088 a8083063 Iustin Pop
2089 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2090 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2091 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2092 0fff97e9 Guido Trotter
2093 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2094 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2095 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2096 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2097 fc0fe88c Iustin Pop
                                 offline=False)
2098 a8083063 Iustin Pop
2099 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2100 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2101 a8083063 Iustin Pop

2102 a8083063 Iustin Pop
    """
2103 a8083063 Iustin Pop
    new_node = self.new_node
2104 a8083063 Iustin Pop
    node = new_node.name
2105 a8083063 Iustin Pop
2106 a8083063 Iustin Pop
    # check connectivity
2107 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2108 781de953 Iustin Pop
    result.Raise()
2109 781de953 Iustin Pop
    if result.data:
2110 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2111 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2112 781de953 Iustin Pop
                     node, result.data)
2113 a8083063 Iustin Pop
      else:
2114 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2115 3ecf6786 Iustin Pop
                                 " node version %s" %
2116 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2117 a8083063 Iustin Pop
    else:
2118 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2119 a8083063 Iustin Pop
2120 a8083063 Iustin Pop
    # setup ssh on node
2121 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2122 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2123 a8083063 Iustin Pop
    keyarray = []
2124 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2125 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2126 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2127 a8083063 Iustin Pop
2128 a8083063 Iustin Pop
    for i in keyfiles:
2129 a8083063 Iustin Pop
      f = open(i, 'r')
2130 a8083063 Iustin Pop
      try:
2131 a8083063 Iustin Pop
        keyarray.append(f.read())
2132 a8083063 Iustin Pop
      finally:
2133 a8083063 Iustin Pop
        f.close()
2134 a8083063 Iustin Pop
2135 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2136 72737a7f Iustin Pop
                                    keyarray[2],
2137 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2138 a8083063 Iustin Pop
2139 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2140 a1b805fb Iustin Pop
    if msg:
2141 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2142 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2143 a8083063 Iustin Pop
2144 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2145 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
2146 c8a0948f Michael Hanselmann
2147 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2148 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2149 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2150 781de953 Iustin Pop
      if result.failed or not result.data:
2151 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2152 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2153 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2154 a8083063 Iustin Pop
2155 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2156 5c0527ed Guido Trotter
    node_verify_param = {
2157 5c0527ed Guido Trotter
      'nodelist': [node],
2158 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2159 5c0527ed Guido Trotter
    }
2160 5c0527ed Guido Trotter
2161 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2162 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2163 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2164 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2165 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2166 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2167 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2168 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2169 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2170 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2171 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2172 ff98055b Iustin Pop
2173 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2174 a8083063 Iustin Pop
    # including the node just added
2175 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2176 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2177 102b115b Michael Hanselmann
    if not self.op.readd:
2178 102b115b Michael Hanselmann
      dist_nodes.append(node)
2179 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2180 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2181 a8083063 Iustin Pop
2182 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2183 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2184 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2185 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2186 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2187 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2188 a8083063 Iustin Pop
2189 d6a02168 Michael Hanselmann
    to_copy = []
2190 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2191 2928f08d Guido Trotter
    if constants.HTS_USE_VNC.intersection(enabled_hypervisors):
2192 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2193 2928f08d Guido Trotter
2194 a8083063 Iustin Pop
    for fname in to_copy:
2195 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2196 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2197 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2198 a8083063 Iustin Pop
2199 d8470559 Michael Hanselmann
    if self.op.readd:
2200 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2201 d8470559 Michael Hanselmann
    else:
2202 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2203 a8083063 Iustin Pop
2204 a8083063 Iustin Pop
2205 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2206 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2207 b31c8676 Iustin Pop

2208 b31c8676 Iustin Pop
  """
2209 b31c8676 Iustin Pop
  HPATH = "node-modify"
2210 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2211 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2212 b31c8676 Iustin Pop
  REQ_BGL = False
2213 b31c8676 Iustin Pop
2214 b31c8676 Iustin Pop
  def CheckArguments(self):
2215 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2216 b31c8676 Iustin Pop
    if node_name is None:
2217 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2218 b31c8676 Iustin Pop
    self.op.node_name = node_name
2219 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2220 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2221 3a5ba66a Iustin Pop
    if self.op.master_candidate is None and self.op.offline is None:
2222 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2223 3a5ba66a Iustin Pop
    if self.op.offline == True and self.op.master_candidate == True:
2224 3a5ba66a Iustin Pop
      raise errors.OpPrereqError("Can't set the node into offline and"
2225 3a5ba66a Iustin Pop
                                 " master_candidate at the same time")
2226 b31c8676 Iustin Pop
2227 b31c8676 Iustin Pop
  def ExpandNames(self):
2228 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2229 b31c8676 Iustin Pop
2230 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2231 b31c8676 Iustin Pop
    """Build hooks env.
2232 b31c8676 Iustin Pop

2233 b31c8676 Iustin Pop
    This runs on the master node.
2234 b31c8676 Iustin Pop

2235 b31c8676 Iustin Pop
    """
2236 b31c8676 Iustin Pop
    env = {
2237 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2238 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2239 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2240 b31c8676 Iustin Pop
      }
2241 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2242 b31c8676 Iustin Pop
          self.op.node_name]
2243 b31c8676 Iustin Pop
    return env, nl, nl
2244 b31c8676 Iustin Pop
2245 b31c8676 Iustin Pop
  def CheckPrereq(self):
2246 b31c8676 Iustin Pop
    """Check prerequisites.
2247 b31c8676 Iustin Pop

2248 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2249 b31c8676 Iustin Pop

2250 b31c8676 Iustin Pop
    """
2251 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2252 b31c8676 Iustin Pop
2253 3a5ba66a Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True)
2254 3a5ba66a Iustin Pop
        and node.master_candidate):
2255 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2256 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2257 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2258 3a5ba66a Iustin Pop
                                   " master candidate and online")
2259 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2260 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2261 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2262 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2263 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2264 3a5ba66a Iustin Pop
        if self.op.force:
2265 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2266 3e83dd48 Iustin Pop
        else:
2267 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2268 3e83dd48 Iustin Pop
2269 3a5ba66a Iustin Pop
    if (self.op.master_candidate == True and node.offline and
2270 3a5ba66a Iustin Pop
        not self.op.offline == False):
2271 3a5ba66a Iustin Pop
      raise errors.OpPrereqError("Can't set an offline node to"
2272 3a5ba66a Iustin Pop
                                 " master_candidate")
2273 3a5ba66a Iustin Pop
2274 b31c8676 Iustin Pop
    return
2275 b31c8676 Iustin Pop
2276 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2277 b31c8676 Iustin Pop
    """Modifies a node.
2278 b31c8676 Iustin Pop

2279 b31c8676 Iustin Pop
    """
2280 3a5ba66a Iustin Pop
    node = self.node
2281 b31c8676 Iustin Pop
2282 b31c8676 Iustin Pop
    result = []
2283 b31c8676 Iustin Pop
2284 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2285 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2286 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2287 3a5ba66a Iustin Pop
      if self.op.offline == True and node.master_candidate:
2288 3a5ba66a Iustin Pop
        node.master_candidate = False
2289 3a5ba66a Iustin Pop
        result.append(("master_candidate", "auto-demotion due to offline"))
2290 3a5ba66a Iustin Pop
2291 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2292 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2293 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2294 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2295 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2296 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2297 0959c824 Iustin Pop
        if msg:
2298 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2299 b31c8676 Iustin Pop
2300 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2301 b31c8676 Iustin Pop
    self.cfg.Update(node)
2302 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2303 3a26773f Iustin Pop
    if self.op.node_name != self.cfg.GetMasterNode():
2304 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2305 b31c8676 Iustin Pop
2306 b31c8676 Iustin Pop
    return result
2307 b31c8676 Iustin Pop
2308 b31c8676 Iustin Pop
2309 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2310 a8083063 Iustin Pop
  """Query cluster configuration.
2311 a8083063 Iustin Pop

2312 a8083063 Iustin Pop
  """
2313 a8083063 Iustin Pop
  _OP_REQP = []
2314 642339cf Guido Trotter
  REQ_BGL = False
2315 642339cf Guido Trotter
2316 642339cf Guido Trotter
  def ExpandNames(self):
2317 642339cf Guido Trotter
    self.needed_locks = {}
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
  def CheckPrereq(self):
2320 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2321 a8083063 Iustin Pop

2322 a8083063 Iustin Pop
    """
2323 a8083063 Iustin Pop
    pass
2324 a8083063 Iustin Pop
2325 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2326 a8083063 Iustin Pop
    """Return cluster config.
2327 a8083063 Iustin Pop

2328 a8083063 Iustin Pop
    """
2329 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2330 a8083063 Iustin Pop
    result = {
2331 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2332 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2333 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2334 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2335 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2336 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2337 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2338 469f88e1 Iustin Pop
      "master": cluster.master_node,
2339 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2340 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2341 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2342 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2343 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2344 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2345 a8083063 Iustin Pop
      }
2346 a8083063 Iustin Pop
2347 a8083063 Iustin Pop
    return result
2348 a8083063 Iustin Pop
2349 a8083063 Iustin Pop
2350 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2351 ae5849b5 Michael Hanselmann
  """Return configuration values.
2352 a8083063 Iustin Pop

2353 a8083063 Iustin Pop
  """
2354 a8083063 Iustin Pop
  _OP_REQP = []
2355 642339cf Guido Trotter
  REQ_BGL = False
2356 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2357 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2358 642339cf Guido Trotter
2359 642339cf Guido Trotter
  def ExpandNames(self):
2360 642339cf Guido Trotter
    self.needed_locks = {}
2361 a8083063 Iustin Pop
2362 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2363 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2364 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2365 ae5849b5 Michael Hanselmann
2366 a8083063 Iustin Pop
  def CheckPrereq(self):
2367 a8083063 Iustin Pop
    """No prerequisites.
2368 a8083063 Iustin Pop

2369 a8083063 Iustin Pop
    """
2370 a8083063 Iustin Pop
    pass
2371 a8083063 Iustin Pop
2372 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2373 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2374 a8083063 Iustin Pop

2375 a8083063 Iustin Pop
    """
2376 ae5849b5 Michael Hanselmann
    values = []
2377 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2378 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2379 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2380 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2381 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2382 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2383 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2384 ae5849b5 Michael Hanselmann
      else:
2385 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2386 3ccafd0e Iustin Pop
      values.append(entry)
2387 ae5849b5 Michael Hanselmann
    return values
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
2390 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2391 a8083063 Iustin Pop
  """Bring up an instance's disks.
2392 a8083063 Iustin Pop

2393 a8083063 Iustin Pop
  """
2394 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2395 f22a8ba3 Guido Trotter
  REQ_BGL = False
2396 f22a8ba3 Guido Trotter
2397 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2398 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2399 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2400 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2401 f22a8ba3 Guido Trotter
2402 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2403 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2404 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2405 a8083063 Iustin Pop
2406 a8083063 Iustin Pop
  def CheckPrereq(self):
2407 a8083063 Iustin Pop
    """Check prerequisites.
2408 a8083063 Iustin Pop

2409 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2410 a8083063 Iustin Pop

2411 a8083063 Iustin Pop
    """
2412 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2413 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2414 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2415 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2416 a8083063 Iustin Pop
2417 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2418 a8083063 Iustin Pop
    """Activate the disks.
2419 a8083063 Iustin Pop

2420 a8083063 Iustin Pop
    """
2421 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2422 a8083063 Iustin Pop
    if not disks_ok:
2423 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2424 a8083063 Iustin Pop
2425 a8083063 Iustin Pop
    return disks_info
2426 a8083063 Iustin Pop
2427 a8083063 Iustin Pop
2428 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2429 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2430 a8083063 Iustin Pop

2431 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2432 a8083063 Iustin Pop

2433 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2434 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2435 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2436 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2437 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2438 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2439 e4376078 Iustin Pop
      won't result in an error return from the function
2440 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2441 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2442 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2443 a8083063 Iustin Pop

2444 a8083063 Iustin Pop
  """
2445 a8083063 Iustin Pop
  device_info = []
2446 a8083063 Iustin Pop
  disks_ok = True
2447 fdbd668d Iustin Pop
  iname = instance.name
2448 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2449 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2450 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2451 fdbd668d Iustin Pop
2452 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2453 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2454 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2455 fdbd668d Iustin Pop
  # SyncSource, etc.)
2456 fdbd668d Iustin Pop
2457 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2458 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2459 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2460 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2461 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2462 781de953 Iustin Pop
      if result.failed or not result:
2463 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2464 86d9d3bb Iustin Pop
                           " (is_primary=False, pass=1)",
2465 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2466 fdbd668d Iustin Pop
        if not ignore_secondaries:
2467 a8083063 Iustin Pop
          disks_ok = False
2468 fdbd668d Iustin Pop
2469 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2470 fdbd668d Iustin Pop
2471 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2472 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2473 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2474 fdbd668d Iustin Pop
      if node != instance.primary_node:
2475 fdbd668d Iustin Pop
        continue
2476 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2477 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2478 781de953 Iustin Pop
      if result.failed or not result:
2479 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2480 86d9d3bb Iustin Pop
                           " (is_primary=True, pass=2)",
2481 86d9d3bb Iustin Pop
                           inst_disk.iv_name, node)
2482 fdbd668d Iustin Pop
        disks_ok = False
2483 2b17c3c4 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result.data))
2484 a8083063 Iustin Pop
2485 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2486 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2487 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2488 b352ab5b Iustin Pop
  for disk in instance.disks:
2489 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2490 b352ab5b Iustin Pop
2491 a8083063 Iustin Pop
  return disks_ok, device_info
2492 a8083063 Iustin Pop
2493 a8083063 Iustin Pop
2494 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2495 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2496 3ecf6786 Iustin Pop

2497 3ecf6786 Iustin Pop
  """
2498 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2499 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2500 fe7b0351 Michael Hanselmann
  if not disks_ok:
2501 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2502 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2503 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2504 86d9d3bb Iustin Pop
                         " secondary node,"
2505 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2506 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2507 fe7b0351 Michael Hanselmann
2508 fe7b0351 Michael Hanselmann
2509 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2510 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2511 a8083063 Iustin Pop

2512 a8083063 Iustin Pop
  """
2513 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2514 f22a8ba3 Guido Trotter
  REQ_BGL = False
2515 f22a8ba3 Guido Trotter
2516 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2517 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2518 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2519 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2520 f22a8ba3 Guido Trotter
2521 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2522 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2523 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2524 a8083063 Iustin Pop
2525 a8083063 Iustin Pop
  def CheckPrereq(self):
2526 a8083063 Iustin Pop
    """Check prerequisites.
2527 a8083063 Iustin Pop

2528 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2529 a8083063 Iustin Pop

2530 a8083063 Iustin Pop
    """
2531 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2532 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2533 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2534 a8083063 Iustin Pop
2535 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2536 a8083063 Iustin Pop
    """Deactivate the disks
2537 a8083063 Iustin Pop

2538 a8083063 Iustin Pop
    """
2539 a8083063 Iustin Pop
    instance = self.instance
2540 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2541 a8083063 Iustin Pop
2542 a8083063 Iustin Pop
2543 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2544 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2545 155d6c75 Guido Trotter

2546 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2547 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2548 155d6c75 Guido Trotter

2549 155d6c75 Guido Trotter
  """
2550 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2551 72737a7f Iustin Pop
                                      [instance.hypervisor])
2552 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2553 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2554 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2555 155d6c75 Guido Trotter
                             instance.primary_node)
2556 155d6c75 Guido Trotter
2557 781de953 Iustin Pop
  if instance.name in ins_l.data:
2558 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2559 155d6c75 Guido Trotter
                             " block devices.")
2560 155d6c75 Guido Trotter
2561 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2562 a8083063 Iustin Pop
2563 a8083063 Iustin Pop
2564 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2565 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2566 a8083063 Iustin Pop

2567 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2568 a8083063 Iustin Pop

2569 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2570 a8083063 Iustin Pop
  ignored.
2571 a8083063 Iustin Pop

2572 a8083063 Iustin Pop
  """
2573 a8083063 Iustin Pop
  result = True
2574 a8083063 Iustin Pop
  for disk in instance.disks:
2575 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2576 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2577 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2578 781de953 Iustin Pop
      if result.failed or not result.data:
2579 9a4f63d1 Iustin Pop
        logging.error("Could not shutdown block device %s on node %s",
2580 9a4f63d1 Iustin Pop
                      disk.iv_name, node)
2581 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2582 a8083063 Iustin Pop
          result = False
2583 a8083063 Iustin Pop
  return result
2584 a8083063 Iustin Pop
2585 a8083063 Iustin Pop
2586 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2587 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2588 d4f16fd9 Iustin Pop

2589 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2590 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2591 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2592 d4f16fd9 Iustin Pop
  exception.
2593 d4f16fd9 Iustin Pop

2594 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2595 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2596 e69d05fd Iustin Pop
  @type node: C{str}
2597 e69d05fd Iustin Pop
  @param node: the node to check
2598 e69d05fd Iustin Pop
  @type reason: C{str}
2599 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2600 e69d05fd Iustin Pop
  @type requested: C{int}
2601 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2602 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2603 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2604 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2605 e69d05fd Iustin Pop
      we cannot check the node
2606 d4f16fd9 Iustin Pop

2607 d4f16fd9 Iustin Pop
  """
2608 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2609 781de953 Iustin Pop
  nodeinfo[node].Raise()
2610 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2611 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2612 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2613 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2614 d4f16fd9 Iustin Pop
  if requested > free_mem:
2615 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2616 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2617 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2618 d4f16fd9 Iustin Pop
2619 d4f16fd9 Iustin Pop
2620 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2621 a8083063 Iustin Pop
  """Starts an instance.
2622 a8083063 Iustin Pop

2623 a8083063 Iustin Pop
  """
2624 a8083063 Iustin Pop
  HPATH = "instance-start"
2625 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2626 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2627 e873317a Guido Trotter
  REQ_BGL = False
2628 e873317a Guido Trotter
2629 e873317a Guido Trotter
  def ExpandNames(self):
2630 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2631 a8083063 Iustin Pop
2632 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2633 a8083063 Iustin Pop
    """Build hooks env.
2634 a8083063 Iustin Pop

2635 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2636 a8083063 Iustin Pop

2637 a8083063 Iustin Pop
    """
2638 a8083063 Iustin Pop
    env = {
2639 a8083063 Iustin Pop
      "FORCE": self.op.force,
2640 a8083063 Iustin Pop
      }
2641 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2642 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2643 a8083063 Iustin Pop
    return env, nl, nl
2644 a8083063 Iustin Pop
2645 a8083063 Iustin Pop
  def CheckPrereq(self):
2646 a8083063 Iustin Pop
    """Check prerequisites.
2647 a8083063 Iustin Pop

2648 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2649 a8083063 Iustin Pop

2650 a8083063 Iustin Pop
    """
2651 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2652 e873317a Guido Trotter
    assert self.instance is not None, \
2653 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2654 a8083063 Iustin Pop
2655 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2656 7527a8a4 Iustin Pop
2657 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2658 a8083063 Iustin Pop
    # check bridges existance
2659 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2660 a8083063 Iustin Pop
2661 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, instance.primary_node,
2662 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2663 338e51e8 Iustin Pop
                         bep[constants.BE_MEMORY], instance.hypervisor)
2664 d4f16fd9 Iustin Pop
2665 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2666 a8083063 Iustin Pop
    """Start the instance.
2667 a8083063 Iustin Pop

2668 a8083063 Iustin Pop
    """
2669 a8083063 Iustin Pop
    instance = self.instance
2670 a8083063 Iustin Pop
    force = self.op.force
2671 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2672 a8083063 Iustin Pop
2673 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2674 fe482621 Iustin Pop
2675 a8083063 Iustin Pop
    node_current = instance.primary_node
2676 a8083063 Iustin Pop
2677 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2678 a8083063 Iustin Pop
2679 781de953 Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance, extra_args)
2680 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
2681 dd279568 Iustin Pop
    if msg:
2682 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2683 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2684 a8083063 Iustin Pop
2685 a8083063 Iustin Pop
2686 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2687 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2688 bf6929a2 Alexander Schreiber

2689 bf6929a2 Alexander Schreiber
  """
2690 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2691 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2692 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2693 e873317a Guido Trotter
  REQ_BGL = False
2694 e873317a Guido Trotter
2695 e873317a Guido Trotter
  def ExpandNames(self):
2696 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2697 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2698 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2699 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2700 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2701 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2702 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2703 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2704 bf6929a2 Alexander Schreiber
2705 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2706 bf6929a2 Alexander Schreiber
    """Build hooks env.
2707 bf6929a2 Alexander Schreiber

2708 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2709 bf6929a2 Alexander Schreiber

2710 bf6929a2 Alexander Schreiber
    """
2711 bf6929a2 Alexander Schreiber
    env = {
2712 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2713 bf6929a2 Alexander Schreiber
      }
2714 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2715 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2716 bf6929a2 Alexander Schreiber
    return env, nl, nl
2717 bf6929a2 Alexander Schreiber
2718 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2719 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2720 bf6929a2 Alexander Schreiber

2721 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2722 bf6929a2 Alexander Schreiber

2723 bf6929a2 Alexander Schreiber
    """
2724 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2725 e873317a Guido Trotter
    assert self.instance is not None, \
2726 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2727 bf6929a2 Alexander Schreiber
2728 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2729 7527a8a4 Iustin Pop
2730 bf6929a2 Alexander Schreiber
    # check bridges existance
2731 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2732 bf6929a2 Alexander Schreiber
2733 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2734 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2735 bf6929a2 Alexander Schreiber

2736 bf6929a2 Alexander Schreiber
    """
2737 bf6929a2 Alexander Schreiber
    instance = self.instance
2738 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2739 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2740 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2741 bf6929a2 Alexander Schreiber
2742 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2743 bf6929a2 Alexander Schreiber
2744 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2745 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2746 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
2747 781de953 Iustin Pop
                                             reboot_type, extra_args)
2748 781de953 Iustin Pop
      if result.failed or not result.data:
2749 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2750 bf6929a2 Alexander Schreiber
    else:
2751 72737a7f Iustin Pop
      if not self.rpc.call_instance_shutdown(node_current, instance):
2752 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2753 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2754 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2755 781de953 Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, extra_args)
2756 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
2757 dd279568 Iustin Pop
      if msg:
2758 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2759 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
2760 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
2761 bf6929a2 Alexander Schreiber
2762 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2763 bf6929a2 Alexander Schreiber
2764 bf6929a2 Alexander Schreiber
2765 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2766 a8083063 Iustin Pop
  """Shutdown an instance.
2767 a8083063 Iustin Pop

2768 a8083063 Iustin Pop
  """
2769 a8083063 Iustin Pop
  HPATH = "instance-stop"
2770 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2771 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2772 e873317a Guido Trotter
  REQ_BGL = False
2773 e873317a Guido Trotter
2774 e873317a Guido Trotter
  def ExpandNames(self):
2775 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2776 a8083063 Iustin Pop
2777 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2778 a8083063 Iustin Pop
    """Build hooks env.
2779 a8083063 Iustin Pop

2780 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2781 a8083063 Iustin Pop

2782 a8083063 Iustin Pop
    """
2783 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2784 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2785 a8083063 Iustin Pop
    return env, nl, nl
2786 a8083063 Iustin Pop
2787 a8083063 Iustin Pop
  def CheckPrereq(self):
2788 a8083063 Iustin Pop
    """Check prerequisites.
2789 a8083063 Iustin Pop

2790 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2791 a8083063 Iustin Pop

2792 a8083063 Iustin Pop
    """
2793 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2794 e873317a Guido Trotter
    assert self.instance is not None, \
2795 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2796 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2797 a8083063 Iustin Pop
2798 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2799 a8083063 Iustin Pop
    """Shutdown the instance.
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
    """
2802 a8083063 Iustin Pop
    instance = self.instance
2803 a8083063 Iustin Pop
    node_current = instance.primary_node
2804 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2805 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
2806 781de953 Iustin Pop
    if result.failed or not result.data:
2807 86d9d3bb Iustin Pop
      self.proc.LogWarning("Could not shutdown instance")
2808 a8083063 Iustin Pop
2809 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2810 a8083063 Iustin Pop
2811 a8083063 Iustin Pop
2812 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2813 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2814 fe7b0351 Michael Hanselmann

2815 fe7b0351 Michael Hanselmann
  """
2816 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2817 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2818 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2819 4e0b4d2d Guido Trotter
  REQ_BGL = False
2820 4e0b4d2d Guido Trotter
2821 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2822 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
2823 fe7b0351 Michael Hanselmann
2824 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2825 fe7b0351 Michael Hanselmann
    """Build hooks env.
2826 fe7b0351 Michael Hanselmann

2827 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2828 fe7b0351 Michael Hanselmann

2829 fe7b0351 Michael Hanselmann
    """
2830 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2831 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2832 fe7b0351 Michael Hanselmann
    return env, nl, nl
2833 fe7b0351 Michael Hanselmann
2834 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2835 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2836 fe7b0351 Michael Hanselmann

2837 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2838 fe7b0351 Michael Hanselmann

2839 fe7b0351 Michael Hanselmann
    """
2840 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2841 4e0b4d2d Guido Trotter
    assert instance is not None, \
2842 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2843 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2844 4e0b4d2d Guido Trotter
2845 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2846 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2847 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2848 0d68c45d Iustin Pop
    if instance.admin_up:
2849 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2850 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2851 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2852 72737a7f Iustin Pop
                                              instance.name,
2853 72737a7f Iustin Pop
                                              instance.hypervisor)
2854 781de953 Iustin Pop
    if remote_info.failed or remote_info.data:
2855 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2856 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2857 3ecf6786 Iustin Pop
                                  instance.primary_node))
2858 d0834de3 Michael Hanselmann
2859 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2860 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2861 d0834de3 Michael Hanselmann
      # OS verification
2862 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2863 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2864 d0834de3 Michael Hanselmann
      if pnode is None:
2865 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2866 3ecf6786 Iustin Pop
                                   self.op.pnode)
2867 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
2868 781de953 Iustin Pop
      result.Raise()
2869 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
2870 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2871 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2872 d0834de3 Michael Hanselmann
2873 fe7b0351 Michael Hanselmann
    self.instance = instance
2874 fe7b0351 Michael Hanselmann
2875 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2876 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2877 fe7b0351 Michael Hanselmann

2878 fe7b0351 Michael Hanselmann
    """
2879 fe7b0351 Michael Hanselmann
    inst = self.instance
2880 fe7b0351 Michael Hanselmann
2881 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2882 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2883 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2884 97abc79f Iustin Pop
      self.cfg.Update(inst)
2885 d0834de3 Michael Hanselmann
2886 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2887 fe7b0351 Michael Hanselmann
    try:
2888 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2889 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
2890 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
2891 20e01edd Iustin Pop
      if msg:
2892 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2893 20e01edd Iustin Pop
                                 " on node %s: %s" %
2894 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
2895 fe7b0351 Michael Hanselmann
    finally:
2896 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
2897 fe7b0351 Michael Hanselmann
2898 fe7b0351 Michael Hanselmann
2899 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2900 decd5f45 Iustin Pop
  """Rename an instance.
2901 decd5f45 Iustin Pop

2902 decd5f45 Iustin Pop
  """
2903 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2904 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2905 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2906 decd5f45 Iustin Pop
2907 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2908 decd5f45 Iustin Pop
    """Build hooks env.
2909 decd5f45 Iustin Pop

2910 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2911 decd5f45 Iustin Pop

2912 decd5f45 Iustin Pop
    """
2913 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2914 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2915 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2916 decd5f45 Iustin Pop
    return env, nl, nl
2917 decd5f45 Iustin Pop
2918 decd5f45 Iustin Pop
  def CheckPrereq(self):
2919 decd5f45 Iustin Pop
    """Check prerequisites.
2920 decd5f45 Iustin Pop

2921 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2922 decd5f45 Iustin Pop

2923 decd5f45 Iustin Pop
    """
2924 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2925 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2926 decd5f45 Iustin Pop
    if instance is None:
2927 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2928 decd5f45 Iustin Pop
                                 self.op.instance_name)
2929 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2930 7527a8a4 Iustin Pop
2931 0d68c45d Iustin Pop
    if instance.admin_up:
2932 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2933 decd5f45 Iustin Pop
                                 self.op.instance_name)
2934 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2935 72737a7f Iustin Pop
                                              instance.name,
2936 72737a7f Iustin Pop
                                              instance.hypervisor)
2937 781de953 Iustin Pop
    remote_info.Raise()
2938 781de953 Iustin Pop
    if remote_info.data:
2939 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2940 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2941 decd5f45 Iustin Pop
                                  instance.primary_node))
2942 decd5f45 Iustin Pop
    self.instance = instance
2943 decd5f45 Iustin Pop
2944 decd5f45 Iustin Pop
    # new name verification
2945 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2946 decd5f45 Iustin Pop
2947 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2948 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2949 7bde3275 Guido Trotter
    if new_name in instance_list:
2950 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2951 c09f363f Manuel Franceschini
                                 new_name)
2952 7bde3275 Guido Trotter
2953 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2954 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
2955 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2956 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2957 decd5f45 Iustin Pop
2958 decd5f45 Iustin Pop
2959 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2960 decd5f45 Iustin Pop
    """Reinstall the instance.
2961 decd5f45 Iustin Pop

2962 decd5f45 Iustin Pop
    """
2963 decd5f45 Iustin Pop
    inst = self.instance
2964 decd5f45 Iustin Pop
    old_name = inst.name
2965 decd5f45 Iustin Pop
2966 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2967 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2968 b23c4333 Manuel Franceschini
2969 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2970 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
2971 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
2972 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
2973 decd5f45 Iustin Pop
2974 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2975 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2976 decd5f45 Iustin Pop
2977 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
2978 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
2979 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
2980 72737a7f Iustin Pop
                                                     old_file_storage_dir,
2981 72737a7f Iustin Pop
                                                     new_file_storage_dir)
2982 781de953 Iustin Pop
      result.Raise()
2983 781de953 Iustin Pop
      if not result.data:
2984 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
2985 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
2986 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
2987 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
2988 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
2989 b23c4333 Manuel Franceschini
2990 781de953 Iustin Pop
      if not result.data[0]:
2991 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
2992 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
2993 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
2994 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
2995 b23c4333 Manuel Franceschini
2996 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
2997 decd5f45 Iustin Pop
    try:
2998 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
2999 781de953 Iustin Pop
                                                 old_name)
3000 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3001 96841384 Iustin Pop
      if msg:
3002 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3003 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3004 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3005 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3006 decd5f45 Iustin Pop
    finally:
3007 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3008 decd5f45 Iustin Pop
3009 decd5f45 Iustin Pop
3010 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3011 a8083063 Iustin Pop
  """Remove an instance.
3012 a8083063 Iustin Pop

3013 a8083063 Iustin Pop
  """
3014 a8083063 Iustin Pop
  HPATH = "instance-remove"
3015 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3016 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3017 cf472233 Guido Trotter
  REQ_BGL = False
3018 cf472233 Guido Trotter
3019 cf472233 Guido Trotter
  def ExpandNames(self):
3020 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3021 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3022 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3023 cf472233 Guido Trotter
3024 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3025 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3026 cf472233 Guido Trotter
      self._LockInstancesNodes()
3027 a8083063 Iustin Pop
3028 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3029 a8083063 Iustin Pop
    """Build hooks env.
3030 a8083063 Iustin Pop

3031 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3032 a8083063 Iustin Pop

3033 a8083063 Iustin Pop
    """
3034 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3035 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3036 a8083063 Iustin Pop
    return env, nl, nl
3037 a8083063 Iustin Pop
3038 a8083063 Iustin Pop
  def CheckPrereq(self):
3039 a8083063 Iustin Pop
    """Check prerequisites.
3040 a8083063 Iustin Pop

3041 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3042 a8083063 Iustin Pop

3043 a8083063 Iustin Pop
    """
3044 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3045 cf472233 Guido Trotter
    assert self.instance is not None, \
3046 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3047 a8083063 Iustin Pop
3048 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3049 a8083063 Iustin Pop
    """Remove the instance.
3050 a8083063 Iustin Pop

3051 a8083063 Iustin Pop
    """
3052 a8083063 Iustin Pop
    instance = self.instance
3053 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3054 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3055 a8083063 Iustin Pop
3056 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3057 781de953 Iustin Pop
    if result.failed or not result.data:
3058 1d67656e Iustin Pop
      if self.op.ignore_failures:
3059 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
3060 1d67656e Iustin Pop
      else:
3061 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3062 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
3063 a8083063 Iustin Pop
3064 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3065 a8083063 Iustin Pop
3066 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3067 1d67656e Iustin Pop
      if self.op.ignore_failures:
3068 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3069 1d67656e Iustin Pop
      else:
3070 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3071 a8083063 Iustin Pop
3072 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3073 a8083063 Iustin Pop
3074 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3075 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3076 a8083063 Iustin Pop
3077 a8083063 Iustin Pop
3078 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3079 a8083063 Iustin Pop
  """Logical unit for querying instances.
3080 a8083063 Iustin Pop

3081 a8083063 Iustin Pop
  """
3082 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3083 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3084 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3085 a2d2e1a7 Iustin Pop
                                    "admin_state", "admin_ram",
3086 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3087 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3088 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3089 a2d2e1a7 Iustin Pop
                                    "(disk).(size)/([0-9]+)",
3090 024e157f Iustin Pop
                                    "(disk).(sizes)", "disk_usage",
3091 a2d2e1a7 Iustin Pop
                                    "(nic).(mac|ip|bridge)/([0-9]+)",
3092 a2d2e1a7 Iustin Pop
                                    "(nic).(macs|ips|bridges)",
3093 a2d2e1a7 Iustin Pop
                                    "(disk|nic).(count)",
3094 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3095 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3096 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3097 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3098 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3099 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3100 31bf511f Iustin Pop
3101 a8083063 Iustin Pop
3102 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3103 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3104 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3105 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3106 a8083063 Iustin Pop
3107 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3108 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3109 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3110 7eb9d8f7 Guido Trotter
3111 57a2fb91 Iustin Pop
    if self.op.names:
3112 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3113 7eb9d8f7 Guido Trotter
    else:
3114 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3115 7eb9d8f7 Guido Trotter
3116 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3117 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3118 57a2fb91 Iustin Pop
    if self.do_locking:
3119 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3120 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3121 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3122 7eb9d8f7 Guido Trotter
3123 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3124 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3125 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3126 7eb9d8f7 Guido Trotter
3127 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3128 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3129 7eb9d8f7 Guido Trotter

3130 7eb9d8f7 Guido Trotter
    """
3131 57a2fb91 Iustin Pop
    pass
3132 069dcc86 Iustin Pop
3133 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3134 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3135 a8083063 Iustin Pop

3136 a8083063 Iustin Pop
    """
3137 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3138 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3139 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3140 a7f5dc98 Iustin Pop
      if self.do_locking:
3141 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3142 a7f5dc98 Iustin Pop
      else:
3143 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3144 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3145 57a2fb91 Iustin Pop
    else:
3146 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3147 a7f5dc98 Iustin Pop
      if self.do_locking:
3148 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3149 a7f5dc98 Iustin Pop
      else:
3150 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3151 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3152 a7f5dc98 Iustin Pop
      if missing:
3153 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3154 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3155 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3156 c1f1cbb2 Iustin Pop
3157 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
    # begin data gathering
3160 a8083063 Iustin Pop
3161 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3162 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3163 a8083063 Iustin Pop
3164 a8083063 Iustin Pop
    bad_nodes = []
3165 cbfc4681 Iustin Pop
    off_nodes = []
3166 ec79568d Iustin Pop
    if self.do_node_query:
3167 a8083063 Iustin Pop
      live_data = {}
3168 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3169 a8083063 Iustin Pop
      for name in nodes:
3170 a8083063 Iustin Pop
        result = node_data[name]
3171 cbfc4681 Iustin Pop
        if result.offline:
3172 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3173 cbfc4681 Iustin Pop
          off_nodes.append(name)
3174 781de953 Iustin Pop
        if result.failed:
3175 a8083063 Iustin Pop
          bad_nodes.append(name)
3176 781de953 Iustin Pop
        else:
3177 781de953 Iustin Pop
          if result.data:
3178 781de953 Iustin Pop
            live_data.update(result.data)
3179 781de953 Iustin Pop
            # else no instance is alive
3180 a8083063 Iustin Pop
    else:
3181 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3182 a8083063 Iustin Pop
3183 a8083063 Iustin Pop
    # end data gathering
3184 a8083063 Iustin Pop
3185 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3186 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3187 a8083063 Iustin Pop
    output = []
3188 a8083063 Iustin Pop
    for instance in instance_list:
3189 a8083063 Iustin Pop
      iout = []
3190 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3191 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3192 a8083063 Iustin Pop
      for field in self.op.output_fields:
3193 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3194 a8083063 Iustin Pop
        if field == "name":
3195 a8083063 Iustin Pop
          val = instance.name
3196 a8083063 Iustin Pop
        elif field == "os":
3197 a8083063 Iustin Pop
          val = instance.os
3198 a8083063 Iustin Pop
        elif field == "pnode":
3199 a8083063 Iustin Pop
          val = instance.primary_node
3200 a8083063 Iustin Pop
        elif field == "snodes":
3201 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3202 a8083063 Iustin Pop
        elif field == "admin_state":
3203 0d68c45d Iustin Pop
          val = instance.admin_up
3204 a8083063 Iustin Pop
        elif field == "oper_state":
3205 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3206 8a23d2d3 Iustin Pop
            val = None
3207 a8083063 Iustin Pop
          else:
3208 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3209 d8052456 Iustin Pop
        elif field == "status":
3210 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3211 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3212 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3213 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3214 d8052456 Iustin Pop
          else:
3215 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3216 d8052456 Iustin Pop
            if running:
3217 0d68c45d Iustin Pop
              if instance.admin_up:
3218 d8052456 Iustin Pop
                val = "running"
3219 d8052456 Iustin Pop
              else:
3220 d8052456 Iustin Pop
                val = "ERROR_up"
3221 d8052456 Iustin Pop
            else:
3222 0d68c45d Iustin Pop
              if instance.admin_up:
3223 d8052456 Iustin Pop
                val = "ERROR_down"
3224 d8052456 Iustin Pop
              else:
3225 d8052456 Iustin Pop
                val = "ADMIN_down"
3226 a8083063 Iustin Pop
        elif field == "oper_ram":
3227 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3228 8a23d2d3 Iustin Pop
            val = None
3229 a8083063 Iustin Pop
          elif instance.name in live_data:
3230 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3231 a8083063 Iustin Pop
          else:
3232 a8083063 Iustin Pop
            val = "-"
3233 a8083063 Iustin Pop
        elif field == "disk_template":
3234 a8083063 Iustin Pop
          val = instance.disk_template
3235 a8083063 Iustin Pop
        elif field == "ip":
3236 a8083063 Iustin Pop
          val = instance.nics[0].ip
3237 a8083063 Iustin Pop
        elif field == "bridge":
3238 a8083063 Iustin Pop
          val = instance.nics[0].bridge
3239 a8083063 Iustin Pop
        elif field == "mac":
3240 a8083063 Iustin Pop
          val = instance.nics[0].mac
3241 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3242 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3243 ad24e046 Iustin Pop
          try:
3244 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3245 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3246 8a23d2d3 Iustin Pop
            val = None
3247 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3248 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3249 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3250 130a6a6f Iustin Pop
        elif field == "tags":
3251 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3252 38d7239a Iustin Pop
        elif field == "serial_no":
3253 38d7239a Iustin Pop
          val = instance.serial_no
3254 5018a335 Iustin Pop
        elif field == "network_port":
3255 5018a335 Iustin Pop
          val = instance.network_port
3256 338e51e8 Iustin Pop
        elif field == "hypervisor":
3257 338e51e8 Iustin Pop
          val = instance.hypervisor
3258 338e51e8 Iustin Pop
        elif field == "hvparams":
3259 338e51e8 Iustin Pop
          val = i_hv
3260 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3261 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3262 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3263 338e51e8 Iustin Pop
        elif field == "beparams":
3264 338e51e8 Iustin Pop
          val = i_be
3265 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3266 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3267 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3268 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3269 71c1af58 Iustin Pop
          # matches a variable list
3270 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3271 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3272 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3273 71c1af58 Iustin Pop
              val = len(instance.disks)
3274 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3275 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3276 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3277 3e0cea06 Iustin Pop
              try:
3278 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3279 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3280 71c1af58 Iustin Pop
                val = None
3281 71c1af58 Iustin Pop
            else:
3282 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3283 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3284 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3285 71c1af58 Iustin Pop
              val = len(instance.nics)
3286 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3287 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3288 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3289 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3290 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3291 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3292 71c1af58 Iustin Pop
            else:
3293 71c1af58 Iustin Pop
              # index-based item
3294 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3295 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3296 71c1af58 Iustin Pop
                val = None
3297 71c1af58 Iustin Pop
              else:
3298 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3299 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3300 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3301 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3302 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3303 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3304 71c1af58 Iustin Pop
                else:
3305 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3306 71c1af58 Iustin Pop
          else:
3307 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3308 a8083063 Iustin Pop
        else:
3309 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3310 a8083063 Iustin Pop
        iout.append(val)
3311 a8083063 Iustin Pop
      output.append(iout)
3312 a8083063 Iustin Pop
3313 a8083063 Iustin Pop
    return output
3314 a8083063 Iustin Pop
3315 a8083063 Iustin Pop
3316 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3317 a8083063 Iustin Pop
  """Failover an instance.
3318 a8083063 Iustin Pop

3319 a8083063 Iustin Pop
  """
3320 a8083063 Iustin Pop
  HPATH = "instance-failover"
3321 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3322 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3323 c9e5c064 Guido Trotter
  REQ_BGL = False
3324 c9e5c064 Guido Trotter
3325 c9e5c064 Guido Trotter
  def ExpandNames(self):
3326 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3327 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3328 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3329 c9e5c064 Guido Trotter
3330 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3331 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3332 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3335 a8083063 Iustin Pop
    """Build hooks env.
3336 a8083063 Iustin Pop

3337 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3338 a8083063 Iustin Pop

3339 a8083063 Iustin Pop
    """
3340 a8083063 Iustin Pop
    env = {
3341 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3342 a8083063 Iustin Pop
      }
3343 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3344 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3345 a8083063 Iustin Pop
    return env, nl, nl
3346 a8083063 Iustin Pop
3347 a8083063 Iustin Pop
  def CheckPrereq(self):
3348 a8083063 Iustin Pop
    """Check prerequisites.
3349 a8083063 Iustin Pop

3350 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3351 a8083063 Iustin Pop

3352 a8083063 Iustin Pop
    """
3353 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3354 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3355 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3356 a8083063 Iustin Pop
3357 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3358 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3359 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3360 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3361 2a710df1 Michael Hanselmann
3362 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3363 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3364 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3365 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3366 2a710df1 Michael Hanselmann
3367 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3368 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3369 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
3370 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3371 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
3372 e69d05fd Iustin Pop
                         instance.hypervisor)
3373 3a7c308e Guido Trotter
3374 a8083063 Iustin Pop
    # check bridge existance
3375 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3376 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3377 781de953 Iustin Pop
    result.Raise()
3378 781de953 Iustin Pop
    if not result.data:
3379 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3380 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3381 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3382 a8083063 Iustin Pop
3383 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3384 a8083063 Iustin Pop
    """Failover an instance.
3385 a8083063 Iustin Pop

3386 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3387 a8083063 Iustin Pop
    starting it on the secondary.
3388 a8083063 Iustin Pop

3389 a8083063 Iustin Pop
    """
3390 a8083063 Iustin Pop
    instance = self.instance
3391 a8083063 Iustin Pop
3392 a8083063 Iustin Pop
    source_node = instance.primary_node
3393 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3394 a8083063 Iustin Pop
3395 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3396 a8083063 Iustin Pop
    for dev in instance.disks:
3397 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3398 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3399 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3400 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3401 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3402 a8083063 Iustin Pop
3403 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3404 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3405 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3406 a8083063 Iustin Pop
3407 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3408 781de953 Iustin Pop
    if result.failed or not result.data:
3409 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3410 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3411 86d9d3bb Iustin Pop
                             " Proceeding"
3412 86d9d3bb Iustin Pop
                             " anyway. Please make sure node %s is down",
3413 86d9d3bb Iustin Pop
                             instance.name, source_node, source_node)
3414 24a40d57 Iustin Pop
      else:
3415 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
3416 24a40d57 Iustin Pop
                                 (instance.name, source_node))
3417 a8083063 Iustin Pop
3418 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3419 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3420 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3421 a8083063 Iustin Pop
3422 a8083063 Iustin Pop
    instance.primary_node = target_node
3423 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3424 b6102dab Guido Trotter
    self.cfg.Update(instance)
3425 a8083063 Iustin Pop
3426 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3427 0d68c45d Iustin Pop
    if instance.admin_up:
3428 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3429 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3430 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3431 12a0cfbe Guido Trotter
3432 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3433 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3434 12a0cfbe Guido Trotter
      if not disks_ok:
3435 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3436 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3437 a8083063 Iustin Pop
3438 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3439 781de953 Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None)
3440 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3441 dd279568 Iustin Pop
      if msg:
3442 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3443 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3444 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3445 a8083063 Iustin Pop
3446 a8083063 Iustin Pop
3447 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3448 53c776b5 Iustin Pop
  """Migrate an instance.
3449 53c776b5 Iustin Pop

3450 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3451 53c776b5 Iustin Pop
  which is done with shutdown.
3452 53c776b5 Iustin Pop

3453 53c776b5 Iustin Pop
  """
3454 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3455 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3456 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3457 53c776b5 Iustin Pop
3458 53c776b5 Iustin Pop
  REQ_BGL = False
3459 53c776b5 Iustin Pop
3460 53c776b5 Iustin Pop
  def ExpandNames(self):
3461 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3462 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3463 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3464 53c776b5 Iustin Pop
3465 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3466 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3467 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3468 53c776b5 Iustin Pop
3469 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3470 53c776b5 Iustin Pop
    """Build hooks env.
3471 53c776b5 Iustin Pop

3472 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3473 53c776b5 Iustin Pop

3474 53c776b5 Iustin Pop
    """
3475 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3476 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3477 53c776b5 Iustin Pop
    return env, nl, nl
3478 53c776b5 Iustin Pop
3479 53c776b5 Iustin Pop
  def CheckPrereq(self):
3480 53c776b5 Iustin Pop
    """Check prerequisites.
3481 53c776b5 Iustin Pop

3482 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3483 53c776b5 Iustin Pop

3484 53c776b5 Iustin Pop
    """
3485 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3486 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3487 53c776b5 Iustin Pop
    if instance is None:
3488 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3489 53c776b5 Iustin Pop
                                 self.op.instance_name)
3490 53c776b5 Iustin Pop
3491 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3492 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3493 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3494 53c776b5 Iustin Pop
3495 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3496 53c776b5 Iustin Pop
    if not secondary_nodes:
3497 53c776b5 Iustin Pop
      raise errors.ProgrammerError("no secondary node but using "
3498 53c776b5 Iustin Pop
                                   "drbd8 disk template")
3499 53c776b5 Iustin Pop
3500 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3501 53c776b5 Iustin Pop
3502 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3503 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3504 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3505 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3506 53c776b5 Iustin Pop
                         instance.hypervisor)
3507 53c776b5 Iustin Pop
3508 53c776b5 Iustin Pop
    # check bridge existance
3509 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3510 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3511 53c776b5 Iustin Pop
    if result.failed or not result.data:
3512 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3513 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3514 53c776b5 Iustin Pop
                                 (brlist, target_node))
3515 53c776b5 Iustin Pop
3516 53c776b5 Iustin Pop
    if not self.op.cleanup:
3517 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3518 53c776b5 Iustin Pop
                                                 instance)
3519 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3520 53c776b5 Iustin Pop
      if msg:
3521 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3522 53c776b5 Iustin Pop
                                   msg)
3523 53c776b5 Iustin Pop
3524 53c776b5 Iustin Pop
    self.instance = instance
3525 53c776b5 Iustin Pop
3526 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3527 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3528 53c776b5 Iustin Pop

3529 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3530 53c776b5 Iustin Pop

3531 53c776b5 Iustin Pop
    """
3532 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3533 53c776b5 Iustin Pop
    all_done = False
3534 53c776b5 Iustin Pop
    while not all_done:
3535 53c776b5 Iustin Pop
      all_done = True
3536 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3537 53c776b5 Iustin Pop
                                            self.nodes_ip,
3538 53c776b5 Iustin Pop
                                            self.instance.disks)
3539 53c776b5 Iustin Pop
      min_percent = 100
3540 53c776b5 Iustin Pop
      for node, nres in result.items():
3541 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3542 53c776b5 Iustin Pop
        if msg:
3543 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3544 53c776b5 Iustin Pop
                                   (node, msg))
3545 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3546 53c776b5 Iustin Pop
        all_done = all_done and node_done
3547 53c776b5 Iustin Pop
        if node_percent is not None:
3548 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3549 53c776b5 Iustin Pop
      if not all_done:
3550 53c776b5 Iustin Pop
        if min_percent < 100:
3551 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3552 53c776b5 Iustin Pop
        time.sleep(2)
3553 53c776b5 Iustin Pop
3554 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3555 53c776b5 Iustin Pop
    """Demote a node to secondary.
3556 53c776b5 Iustin Pop

3557 53c776b5 Iustin Pop
    """
3558 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3559 53c776b5 Iustin Pop
3560 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3561 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3562 53c776b5 Iustin Pop
3563 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3564 53c776b5 Iustin Pop
                                          self.instance.disks)
3565 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3566 53c776b5 Iustin Pop
    if msg:
3567 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3568 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3569 53c776b5 Iustin Pop
3570 53c776b5 Iustin Pop
  def _GoStandalone(self):
3571 53c776b5 Iustin Pop
    """Disconnect from the network.
3572 53c776b5 Iustin Pop

3573 53c776b5 Iustin Pop
    """
3574 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3575 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3576 53c776b5 Iustin Pop
                                               self.instance.disks)
3577 53c776b5 Iustin Pop
    for node, nres in result.items():
3578 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3579 53c776b5 Iustin Pop
      if msg:
3580 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3581 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3582 53c776b5 Iustin Pop
3583 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3584 53c776b5 Iustin Pop
    """Reconnect to the network.
3585 53c776b5 Iustin Pop

3586 53c776b5 Iustin Pop
    """
3587 53c776b5 Iustin Pop
    if multimaster:
3588 53c776b5 Iustin Pop
      msg = "dual-master"
3589 53c776b5 Iustin Pop
    else:
3590 53c776b5 Iustin Pop
      msg = "single-master"
3591 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3592 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3593 53c776b5 Iustin Pop
                                           self.instance.disks,
3594 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3595 53c776b5 Iustin Pop
    for node, nres in result.items():
3596 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3597 53c776b5 Iustin Pop
      if msg:
3598 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3599 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3600 53c776b5 Iustin Pop
3601 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3602 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3603 53c776b5 Iustin Pop

3604 53c776b5 Iustin Pop
    The cleanup is done by:
3605 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3606 53c776b5 Iustin Pop
        (and update the config if needed)
3607 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3608 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3609 53c776b5 Iustin Pop
      - disconnect from the network
3610 53c776b5 Iustin Pop
      - change disks into single-master mode
3611 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3612 53c776b5 Iustin Pop

3613 53c776b5 Iustin Pop
    """
3614 53c776b5 Iustin Pop
    instance = self.instance
3615 53c776b5 Iustin Pop
    target_node = self.target_node
3616 53c776b5 Iustin Pop
    source_node = self.source_node
3617 53c776b5 Iustin Pop
3618 53c776b5 Iustin Pop
    # check running on only one node
3619 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3620 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3621 53c776b5 Iustin Pop
                     " a bad state)")
3622 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3623 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3624 53c776b5 Iustin Pop
      result.Raise()
3625 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
3626 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
3627 53c776b5 Iustin Pop
3628 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
3629 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
3630 53c776b5 Iustin Pop
3631 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3632 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3633 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3634 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3635 53c776b5 Iustin Pop
                               " and restart this operation.")
3636 53c776b5 Iustin Pop
3637 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3638 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3639 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3640 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3641 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3642 53c776b5 Iustin Pop
3643 53c776b5 Iustin Pop
    if runningon_target:
3644 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3645 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3646 53c776b5 Iustin Pop
                       " updating config" % target_node)
3647 53c776b5 Iustin Pop
      instance.primary_node = target_node
3648 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3649 53c776b5 Iustin Pop
      demoted_node = source_node
3650 53c776b5 Iustin Pop
    else:
3651 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3652 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3653 53c776b5 Iustin Pop
      demoted_node = target_node
3654 53c776b5 Iustin Pop
3655 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3656 53c776b5 Iustin Pop
    try:
3657 53c776b5 Iustin Pop
      self._WaitUntilSync()
3658 53c776b5 Iustin Pop
    except errors.OpExecError:
3659 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3660 53c776b5 Iustin Pop
      # won't be able to sync
3661 53c776b5 Iustin Pop
      pass
3662 53c776b5 Iustin Pop
    self._GoStandalone()
3663 53c776b5 Iustin Pop
    self._GoReconnect(False)
3664 53c776b5 Iustin Pop
    self._WaitUntilSync()
3665 53c776b5 Iustin Pop
3666 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3667 53c776b5 Iustin Pop
3668 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3669 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3670 6906a9d8 Guido Trotter

3671 6906a9d8 Guido Trotter
    """
3672 6906a9d8 Guido Trotter
    target_node = self.target_node
3673 6906a9d8 Guido Trotter
    try:
3674 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3675 6906a9d8 Guido Trotter
      self._GoStandalone()
3676 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3677 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3678 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3679 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3680 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3681 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3682 6906a9d8 Guido Trotter
                      str(err))
3683 6906a9d8 Guido Trotter
3684 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3685 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3686 6906a9d8 Guido Trotter

3687 6906a9d8 Guido Trotter
    """
3688 6906a9d8 Guido Trotter
    instance = self.instance
3689 6906a9d8 Guido Trotter
    target_node = self.target_node
3690 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3691 6906a9d8 Guido Trotter
3692 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3693 6906a9d8 Guido Trotter
                                                    instance,
3694 6906a9d8 Guido Trotter
                                                    migration_info,
3695 6906a9d8 Guido Trotter
                                                    False)
3696 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
3697 6906a9d8 Guido Trotter
    if abort_msg:
3698 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
3699 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
3700 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
3701 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
3702 6906a9d8 Guido Trotter
3703 53c776b5 Iustin Pop
  def _ExecMigration(self):
3704 53c776b5 Iustin Pop
    """Migrate an instance.
3705 53c776b5 Iustin Pop

3706 53c776b5 Iustin Pop
    The migrate is done by:
3707 53c776b5 Iustin Pop
      - change the disks into dual-master mode
3708 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
3709 53c776b5 Iustin Pop
      - migrate the instance
3710 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
3711 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3712 53c776b5 Iustin Pop
      - change disks into single-master mode
3713 53c776b5 Iustin Pop

3714 53c776b5 Iustin Pop
    """
3715 53c776b5 Iustin Pop
    instance = self.instance
3716 53c776b5 Iustin Pop
    target_node = self.target_node
3717 53c776b5 Iustin Pop
    source_node = self.source_node
3718 53c776b5 Iustin Pop
3719 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
3720 53c776b5 Iustin Pop
    for dev in instance.disks:
3721 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3722 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
3723 53c776b5 Iustin Pop
                                 " synchronized on target node,"
3724 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
3725 53c776b5 Iustin Pop
3726 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
3727 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
3728 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3729 6906a9d8 Guido Trotter
    if msg:
3730 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
3731 0959c824 Iustin Pop
                 (source_node, msg))
3732 6906a9d8 Guido Trotter
      logging.error(log_err)
3733 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
3734 6906a9d8 Guido Trotter
3735 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
3736 6906a9d8 Guido Trotter
3737 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
3738 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
3739 53c776b5 Iustin Pop
    self._GoStandalone()
3740 53c776b5 Iustin Pop
    self._GoReconnect(True)
3741 53c776b5 Iustin Pop
    self._WaitUntilSync()
3742 53c776b5 Iustin Pop
3743 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3744 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
3745 6906a9d8 Guido Trotter
                                           instance,
3746 6906a9d8 Guido Trotter
                                           migration_info,
3747 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
3748 6906a9d8 Guido Trotter
3749 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3750 6906a9d8 Guido Trotter
    if msg:
3751 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
3752 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
3753 6906a9d8 Guido Trotter
      self._AbortMigration()
3754 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3755 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3756 6906a9d8 Guido Trotter
                               (instance.name, msg))
3757 6906a9d8 Guido Trotter
3758 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
3759 53c776b5 Iustin Pop
    time.sleep(10)
3760 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
3761 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
3762 53c776b5 Iustin Pop
                                            self.op.live)
3763 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3764 53c776b5 Iustin Pop
    if msg:
3765 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
3766 53c776b5 Iustin Pop
                    " disk status: %s", msg)
3767 6906a9d8 Guido Trotter
      self._AbortMigration()
3768 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3769 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3770 53c776b5 Iustin Pop
                               (instance.name, msg))
3771 53c776b5 Iustin Pop
    time.sleep(10)
3772 53c776b5 Iustin Pop
3773 53c776b5 Iustin Pop
    instance.primary_node = target_node
3774 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
3775 53c776b5 Iustin Pop
    self.cfg.Update(instance)
3776 53c776b5 Iustin Pop
3777 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
3778 6906a9d8 Guido Trotter
                                              instance,
3779 6906a9d8 Guido Trotter
                                              migration_info,
3780 6906a9d8 Guido Trotter
                                              True)
3781 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3782 6906a9d8 Guido Trotter
    if msg:
3783 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
3784 6906a9d8 Guido Trotter
                    " %s" % msg)
3785 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3786 6906a9d8 Guido Trotter
                               msg)
3787 6906a9d8 Guido Trotter
3788 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
3789 53c776b5 Iustin Pop
    self._WaitUntilSync()
3790 53c776b5 Iustin Pop
    self._GoStandalone()
3791 53c776b5 Iustin Pop
    self._GoReconnect(False)
3792 53c776b5 Iustin Pop
    self._WaitUntilSync()
3793 53c776b5 Iustin Pop
3794 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3795 53c776b5 Iustin Pop
3796 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
3797 53c776b5 Iustin Pop
    """Perform the migration.
3798 53c776b5 Iustin Pop

3799 53c776b5 Iustin Pop
    """
3800 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
3801 53c776b5 Iustin Pop
3802 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
3803 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
3804 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
3805 53c776b5 Iustin Pop
    self.nodes_ip = {
3806 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
3807 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
3808 53c776b5 Iustin Pop
      }
3809 53c776b5 Iustin Pop
    if self.op.cleanup:
3810 53c776b5 Iustin Pop
      return self._ExecCleanup()
3811 53c776b5 Iustin Pop
    else:
3812 53c776b5 Iustin Pop
      return self._ExecMigration()
3813 53c776b5 Iustin Pop
3814 53c776b5 Iustin Pop
3815 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
3816 428958aa Iustin Pop
                    info, force_open):
3817 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
3818 a8083063 Iustin Pop

3819 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
3820 a8083063 Iustin Pop
  all its children.
3821 a8083063 Iustin Pop

3822 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
3823 a8083063 Iustin Pop

3824 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
3825 428958aa Iustin Pop
  @param node: the node on which to create the device
3826 428958aa Iustin Pop
  @type instance: L{objects.Instance}
3827 428958aa Iustin Pop
  @param instance: the instance which owns the device
3828 428958aa Iustin Pop
  @type device: L{objects.Disk}
3829 428958aa Iustin Pop
  @param device: the device to create
3830 428958aa Iustin Pop
  @type force_create: boolean
3831 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
3832 428958aa Iustin Pop
      will be change to True whenever we find a device which has
3833 428958aa Iustin Pop
      CreateOnSecondary() attribute
3834 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
3835 428958aa Iustin Pop
      (this will be represented as a LVM tag)
3836 428958aa Iustin Pop
  @type force_open: boolean
3837 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
3838 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
3839 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
3840 428958aa Iustin Pop
      the child assembly and the device own Open() execution
3841 428958aa Iustin Pop

3842 a8083063 Iustin Pop
  """
3843 a8083063 Iustin Pop
  if device.CreateOnSecondary():
3844 428958aa Iustin Pop
    force_create = True
3845 796cab27 Iustin Pop
3846 a8083063 Iustin Pop
  if device.children:
3847 a8083063 Iustin Pop
    for child in device.children:
3848 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
3849 428958aa Iustin Pop
                      info, force_open)
3850 a8083063 Iustin Pop
3851 428958aa Iustin Pop
  if not force_create:
3852 796cab27 Iustin Pop
    return
3853 796cab27 Iustin Pop
3854 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
3855 de12473a Iustin Pop
3856 de12473a Iustin Pop
3857 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
3858 de12473a Iustin Pop
  """Create a single block device on a given node.
3859 de12473a Iustin Pop

3860 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
3861 de12473a Iustin Pop
  created in advance.
3862 de12473a Iustin Pop

3863 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
3864 de12473a Iustin Pop
  @param node: the node on which to create the device
3865 de12473a Iustin Pop
  @type instance: L{objects.Instance}
3866 de12473a Iustin Pop
  @param instance: the instance which owns the device
3867 de12473a Iustin Pop
  @type device: L{objects.Disk}
3868 de12473a Iustin Pop
  @param device: the device to create
3869 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
3870 de12473a Iustin Pop
      (this will be represented as a LVM tag)
3871 de12473a Iustin Pop
  @type force_open: boolean
3872 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
3873 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
3874 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
3875 de12473a Iustin Pop
      the child assembly and the device own Open() execution
3876 de12473a Iustin Pop

3877 de12473a Iustin Pop
  """
3878 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
3879 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
3880 428958aa Iustin Pop
                                       instance.name, force_open, info)
3881 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
3882 7d81697f Iustin Pop
  if msg:
3883 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
3884 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
3885 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
3886 a8083063 Iustin Pop
  if device.physical_id is None:
3887 0959c824 Iustin Pop
    device.physical_id = result.payload
3888 a8083063 Iustin Pop
3889 a8083063 Iustin Pop
3890 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
3891 923b1523 Iustin Pop
  """Generate a suitable LV name.
3892 923b1523 Iustin Pop

3893 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
3894 923b1523 Iustin Pop

3895 923b1523 Iustin Pop
  """
3896 923b1523 Iustin Pop
  results = []
3897 923b1523 Iustin Pop
  for val in exts:
3898 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
3899 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
3900 923b1523 Iustin Pop
  return results
3901 923b1523 Iustin Pop
3902 923b1523 Iustin Pop
3903 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
3904 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
3905 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
3906 a1f445d3 Iustin Pop

3907 a1f445d3 Iustin Pop
  """
3908 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
3909 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3910 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
3911 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3912 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
3913 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3914 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
3915 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
3916 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
3917 f9518d38 Iustin Pop
                                      p_minor, s_minor,
3918 f9518d38 Iustin Pop
                                      shared_secret),
3919 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
3920 a1f445d3 Iustin Pop
                          iv_name=iv_name)
3921 a1f445d3 Iustin Pop
  return drbd_dev
3922 a1f445d3 Iustin Pop
3923 7c0d6283 Michael Hanselmann
3924 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
3925 a8083063 Iustin Pop
                          instance_name, primary_node,
3926 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
3927 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
3928 e2a65344 Iustin Pop
                          base_index):
3929 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
3930 a8083063 Iustin Pop

3931 a8083063 Iustin Pop
  """
3932 a8083063 Iustin Pop
  #TODO: compute space requirements
3933 a8083063 Iustin Pop
3934 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
3935 08db7c5c Iustin Pop
  disk_count = len(disk_info)
3936 08db7c5c Iustin Pop
  disks = []
3937 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
3938 08db7c5c Iustin Pop
    pass
3939 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
3940 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
3941 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3942 923b1523 Iustin Pop
3943 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
3944 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
3945 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3946 e2a65344 Iustin Pop
      disk_index = idx + base_index
3947 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
3948 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
3949 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
3950 6ec66eae Iustin Pop
                              mode=disk["mode"])
3951 08db7c5c Iustin Pop
      disks.append(disk_dev)
3952 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
3953 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
3954 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
3955 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
3956 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
3957 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
3958 08db7c5c Iustin Pop
3959 e6c1ff2f Iustin Pop
    names = []
3960 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
3961 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
3962 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
3963 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
3964 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3965 112050d9 Iustin Pop
      disk_index = idx + base_index
3966 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
3967 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
3968 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
3969 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
3970 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
3971 08db7c5c Iustin Pop
      disks.append(disk_dev)
3972 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
3973 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
3974 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
3975 0f1a06e3 Manuel Franceschini
3976 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
3977 112050d9 Iustin Pop
      disk_index = idx + base_index
3978 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
3979 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
3980 08db7c5c Iustin Pop
                              logical_id=(file_driver,
3981 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
3982 6ec66eae Iustin Pop
                                                         idx)),
3983 6ec66eae Iustin Pop
                              mode=disk["mode"])
3984 08db7c5c Iustin Pop
      disks.append(disk_dev)
3985 a8083063 Iustin Pop
  else:
3986 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
3987 a8083063 Iustin Pop
  return disks
3988 a8083063 Iustin Pop
3989 a8083063 Iustin Pop
3990 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
3991 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
3992 3ecf6786 Iustin Pop

3993 3ecf6786 Iustin Pop
  """
3994 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
3995 a0c3fea1 Michael Hanselmann
3996 a0c3fea1 Michael Hanselmann
3997 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
3998 a8083063 Iustin Pop
  """Create all disks for an instance.
3999 a8083063 Iustin Pop

4000 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4001 a8083063 Iustin Pop

4002 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4003 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4004 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4005 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4006 e4376078 Iustin Pop
  @rtype: boolean
4007 e4376078 Iustin Pop
  @return: the success of the creation
4008 a8083063 Iustin Pop

4009 a8083063 Iustin Pop
  """
4010 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4011 428958aa Iustin Pop
  pnode = instance.primary_node
4012 a0c3fea1 Michael Hanselmann
4013 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4014 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4015 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4016 0f1a06e3 Manuel Franceschini
4017 781de953 Iustin Pop
    if result.failed or not result.data:
4018 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4019 0f1a06e3 Manuel Franceschini
4020 781de953 Iustin Pop
    if not result.data[0]:
4021 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4022 796cab27 Iustin Pop
                               file_storage_dir)
4023 0f1a06e3 Manuel Franceschini
4024 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4025 24991749 Iustin Pop
  # LUSetInstanceParams
4026 a8083063 Iustin Pop
  for device in instance.disks:
4027 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4028 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4029 a8083063 Iustin Pop
    #HARDCODE
4030 428958aa Iustin Pop
    for node in instance.all_nodes:
4031 428958aa Iustin Pop
      f_create = node == pnode
4032 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4033 a8083063 Iustin Pop
4034 a8083063 Iustin Pop
4035 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4036 a8083063 Iustin Pop
  """Remove all disks for an instance.
4037 a8083063 Iustin Pop

4038 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4039 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4040 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4041 a8083063 Iustin Pop
  with `_CreateDisks()`).
4042 a8083063 Iustin Pop

4043 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4044 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4045 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4046 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4047 e4376078 Iustin Pop
  @rtype: boolean
4048 e4376078 Iustin Pop
  @return: the success of the removal
4049 a8083063 Iustin Pop

4050 a8083063 Iustin Pop
  """
4051 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4052 a8083063 Iustin Pop
4053 a8083063 Iustin Pop
  result = True
4054 a8083063 Iustin Pop
  for device in instance.disks:
4055 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4056 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4057 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_remove(node, disk)
4058 781de953 Iustin Pop
      if result.failed or not result.data:
4059 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not remove block device %s on node %s,"
4060 86d9d3bb Iustin Pop
                           " continuing anyway", device.iv_name, node)
4061 a8083063 Iustin Pop
        result = False
4062 0f1a06e3 Manuel Franceschini
4063 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4064 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4065 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4066 781de953 Iustin Pop
                                                 file_storage_dir)
4067 781de953 Iustin Pop
    if result.failed or not result.data:
4068 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4069 0f1a06e3 Manuel Franceschini
      result = False
4070 0f1a06e3 Manuel Franceschini
4071 a8083063 Iustin Pop
  return result
4072 a8083063 Iustin Pop
4073 a8083063 Iustin Pop
4074 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4075 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4076 e2fe6369 Iustin Pop

4077 e2fe6369 Iustin Pop
  """
4078 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4079 e2fe6369 Iustin Pop
  req_size_dict = {
4080 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4081 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4082 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4083 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4084 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4085 e2fe6369 Iustin Pop
  }
4086 e2fe6369 Iustin Pop
4087 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4088 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4089 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4090 e2fe6369 Iustin Pop
4091 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4092 e2fe6369 Iustin Pop
4093 e2fe6369 Iustin Pop
4094 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4095 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4096 74409b12 Iustin Pop

4097 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4098 74409b12 Iustin Pop
  used in both instance create and instance modify.
4099 74409b12 Iustin Pop

4100 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4101 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4102 74409b12 Iustin Pop
  @type nodenames: list
4103 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4104 74409b12 Iustin Pop
  @type hvname: string
4105 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4106 74409b12 Iustin Pop
  @type hvparams: dict
4107 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4108 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4109 74409b12 Iustin Pop

4110 74409b12 Iustin Pop
  """
4111 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4112 74409b12 Iustin Pop
                                                  hvname,
4113 74409b12 Iustin Pop
                                                  hvparams)
4114 74409b12 Iustin Pop
  for node in nodenames:
4115 781de953 Iustin Pop
    info = hvinfo[node]
4116 68c6f21c Iustin Pop
    if info.offline:
4117 68c6f21c Iustin Pop
      continue
4118 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4119 0959c824 Iustin Pop
    if msg:
4120 74409b12 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation failed:"
4121 0959c824 Iustin Pop
                                 " %s" % msg)
4122 74409b12 Iustin Pop
4123 74409b12 Iustin Pop
4124 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4125 a8083063 Iustin Pop
  """Create an instance.
4126 a8083063 Iustin Pop

4127 a8083063 Iustin Pop
  """
4128 a8083063 Iustin Pop
  HPATH = "instance-add"
4129 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4130 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4131 08db7c5c Iustin Pop
              "mode", "start",
4132 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4133 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4134 7baf741d Guido Trotter
  REQ_BGL = False
4135 7baf741d Guido Trotter
4136 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4137 7baf741d Guido Trotter
    """Expands and checks one node name.
4138 7baf741d Guido Trotter

4139 7baf741d Guido Trotter
    """
4140 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4141 7baf741d Guido Trotter
    if node_full is None:
4142 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4143 7baf741d Guido Trotter
    return node_full
4144 7baf741d Guido Trotter
4145 7baf741d Guido Trotter
  def ExpandNames(self):
4146 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4147 7baf741d Guido Trotter

4148 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4149 7baf741d Guido Trotter

4150 7baf741d Guido Trotter
    """
4151 7baf741d Guido Trotter
    self.needed_locks = {}
4152 7baf741d Guido Trotter
4153 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4154 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4155 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4156 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4157 7baf741d Guido Trotter
4158 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4159 4b2f38dd Iustin Pop
4160 7baf741d Guido Trotter
    # verify creation mode
4161 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4162 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4163 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4164 7baf741d Guido Trotter
                                 self.op.mode)
4165 4b2f38dd Iustin Pop
4166 7baf741d Guido Trotter
    # disk template and mirror node verification
4167 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4168 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4169 7baf741d Guido Trotter
4170 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4171 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4172 4b2f38dd Iustin Pop
4173 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4174 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4175 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4176 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4177 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4178 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4179 4b2f38dd Iustin Pop
4180 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4181 6785674e Iustin Pop
4182 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4183 8705eb96 Iustin Pop
                                  self.op.hvparams)
4184 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4185 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4186 6785674e Iustin Pop
4187 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4188 d4b72030 Guido Trotter
    utils.CheckBEParams(self.op.beparams)
4189 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4190 338e51e8 Iustin Pop
                                    self.op.beparams)
4191 338e51e8 Iustin Pop
4192 7baf741d Guido Trotter
    #### instance parameters check
4193 7baf741d Guido Trotter
4194 7baf741d Guido Trotter
    # instance name verification
4195 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4196 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4197 7baf741d Guido Trotter
4198 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4199 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4200 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4201 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4202 7baf741d Guido Trotter
                                 instance_name)
4203 7baf741d Guido Trotter
4204 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4205 7baf741d Guido Trotter
4206 08db7c5c Iustin Pop
    # NIC buildup
4207 08db7c5c Iustin Pop
    self.nics = []
4208 08db7c5c Iustin Pop
    for nic in self.op.nics:
4209 08db7c5c Iustin Pop
      # ip validity checks
4210 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4211 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4212 08db7c5c Iustin Pop
        nic_ip = None
4213 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4214 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4215 08db7c5c Iustin Pop
      else:
4216 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4217 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4218 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4219 08db7c5c Iustin Pop
        nic_ip = ip
4220 08db7c5c Iustin Pop
4221 08db7c5c Iustin Pop
      # MAC address verification
4222 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4223 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4224 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4225 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4226 08db7c5c Iustin Pop
                                     mac)
4227 08db7c5c Iustin Pop
      # bridge verification
4228 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4229 9939547b Iustin Pop
      if bridge is None:
4230 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4231 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4232 08db7c5c Iustin Pop
4233 08db7c5c Iustin Pop
    # disk checks/pre-build
4234 08db7c5c Iustin Pop
    self.disks = []
4235 08db7c5c Iustin Pop
    for disk in self.op.disks:
4236 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4237 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4238 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4239 08db7c5c Iustin Pop
                                   mode)
4240 08db7c5c Iustin Pop
      size = disk.get("size", None)
4241 08db7c5c Iustin Pop
      if size is None:
4242 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4243 08db7c5c Iustin Pop
      try:
4244 08db7c5c Iustin Pop
        size = int(size)
4245 08db7c5c Iustin Pop
      except ValueError:
4246 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4247 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4248 08db7c5c Iustin Pop
4249 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4250 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4251 7baf741d Guido Trotter
4252 7baf741d Guido Trotter
    # file storage checks
4253 7baf741d Guido Trotter
    if (self.op.file_driver and
4254 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4255 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4256 7baf741d Guido Trotter
                                 self.op.file_driver)
4257 7baf741d Guido Trotter
4258 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4259 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4260 7baf741d Guido Trotter
4261 7baf741d Guido Trotter
    ### Node/iallocator related checks
4262 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4263 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4264 7baf741d Guido Trotter
                                 " node must be given")
4265 7baf741d Guido Trotter
4266 7baf741d Guido Trotter
    if self.op.iallocator:
4267 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4268 7baf741d Guido Trotter
    else:
4269 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4270 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4271 7baf741d Guido Trotter
      if self.op.snode is not None:
4272 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4273 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4274 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4275 7baf741d Guido Trotter
4276 7baf741d Guido Trotter
    # in case of import lock the source node too
4277 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4278 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4279 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4280 7baf741d Guido Trotter
4281 b9322a9f Guido Trotter
      if src_path is None:
4282 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4283 b9322a9f Guido Trotter
4284 b9322a9f Guido Trotter
      if src_node is None:
4285 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4286 b9322a9f Guido Trotter
        self.op.src_node = None
4287 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4288 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4289 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4290 b9322a9f Guido Trotter
      else:
4291 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4292 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4293 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4294 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4295 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4296 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4297 7baf741d Guido Trotter
4298 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4299 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4300 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4301 a8083063 Iustin Pop
4302 538475ca Iustin Pop
  def _RunAllocator(self):
4303 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4304 538475ca Iustin Pop

4305 538475ca Iustin Pop
    """
4306 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4307 72737a7f Iustin Pop
    ial = IAllocator(self,
4308 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4309 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4310 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4311 d1c2dd75 Iustin Pop
                     tags=[],
4312 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4313 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4314 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4315 08db7c5c Iustin Pop
                     disks=self.disks,
4316 d1c2dd75 Iustin Pop
                     nics=nics,
4317 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4318 29859cb7 Iustin Pop
                     )
4319 d1c2dd75 Iustin Pop
4320 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4321 d1c2dd75 Iustin Pop
4322 d1c2dd75 Iustin Pop
    if not ial.success:
4323 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4324 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4325 d1c2dd75 Iustin Pop
                                                           ial.info))
4326 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4327 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4328 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4329 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4330 1ce4bbe3 René Nussbaumer
                                  ial.required_nodes))
4331 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4332 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4333 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4334 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4335 27579978 Iustin Pop
    if ial.required_nodes == 2:
4336 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4337 538475ca Iustin Pop
4338 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4339 a8083063 Iustin Pop
    """Build hooks env.
4340 a8083063 Iustin Pop

4341 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4342 a8083063 Iustin Pop

4343 a8083063 Iustin Pop
    """
4344 a8083063 Iustin Pop
    env = {
4345 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
4346 08db7c5c Iustin Pop
      "INSTANCE_DISK_SIZE": ",".join(str(d["size"]) for d in self.disks),
4347 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
4348 a8083063 Iustin Pop
      }
4349 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4350 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
4351 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
4352 09acf207 Guido Trotter
      env["INSTANCE_SRC_IMAGES"] = self.src_images
4353 396e1b78 Michael Hanselmann
4354 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
4355 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4356 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4357 4978db17 Iustin Pop
      status=self.op.start,
4358 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4359 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4360 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4361 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4362 396e1b78 Michael Hanselmann
    ))
4363 a8083063 Iustin Pop
4364 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4365 a8083063 Iustin Pop
          self.secondaries)
4366 a8083063 Iustin Pop
    return env, nl, nl
4367 a8083063 Iustin Pop
4368 a8083063 Iustin Pop
4369 a8083063 Iustin Pop
  def CheckPrereq(self):
4370 a8083063 Iustin Pop
    """Check prerequisites.
4371 a8083063 Iustin Pop

4372 a8083063 Iustin Pop
    """
4373 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4374 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4375 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4376 eedc99de Manuel Franceschini
                                 " instances")
4377 eedc99de Manuel Franceschini
4378 e69d05fd Iustin Pop
4379 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4380 7baf741d Guido Trotter
      src_node = self.op.src_node
4381 7baf741d Guido Trotter
      src_path = self.op.src_path
4382 a8083063 Iustin Pop
4383 c0cbdc67 Guido Trotter
      if src_node is None:
4384 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4385 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4386 c0cbdc67 Guido Trotter
        found = False
4387 c0cbdc67 Guido Trotter
        for node in exp_list:
4388 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4389 c0cbdc67 Guido Trotter
            found = True
4390 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4391 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4392 c0cbdc67 Guido Trotter
                                                       src_path)
4393 c0cbdc67 Guido Trotter
            break
4394 c0cbdc67 Guido Trotter
        if not found:
4395 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4396 c0cbdc67 Guido Trotter
                                      src_path)
4397 c0cbdc67 Guido Trotter
4398 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4399 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4400 781de953 Iustin Pop
      result.Raise()
4401 781de953 Iustin Pop
      if not result.data:
4402 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4403 a8083063 Iustin Pop
4404 781de953 Iustin Pop
      export_info = result.data
4405 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4406 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4407 a8083063 Iustin Pop
4408 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4409 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4410 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4411 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4412 a8083063 Iustin Pop
4413 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4414 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4415 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4416 09acf207 Guido Trotter
      if instance_disks < export_disks:
4417 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4418 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4419 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4420 a8083063 Iustin Pop
4421 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4422 09acf207 Guido Trotter
      disk_images = []
4423 09acf207 Guido Trotter
      for idx in range(export_disks):
4424 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4425 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4426 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4427 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4428 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4429 09acf207 Guido Trotter
          disk_images.append(image)
4430 09acf207 Guido Trotter
        else:
4431 09acf207 Guido Trotter
          disk_images.append(False)
4432 09acf207 Guido Trotter
4433 09acf207 Guido Trotter
      self.src_images = disk_images
4434 901a65c1 Iustin Pop
4435 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4436 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4437 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4438 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4439 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4440 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4441 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4442 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4443 bc89efc3 Guido Trotter
4444 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4445 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4446 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4447 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4448 901a65c1 Iustin Pop
4449 901a65c1 Iustin Pop
    if self.op.ip_check:
4450 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4451 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4452 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4453 901a65c1 Iustin Pop
4454 538475ca Iustin Pop
    #### allocator run
4455 538475ca Iustin Pop
4456 538475ca Iustin Pop
    if self.op.iallocator is not None:
4457 538475ca Iustin Pop
      self._RunAllocator()
4458 0f1a06e3 Manuel Franceschini
4459 901a65c1 Iustin Pop
    #### node related checks
4460 901a65c1 Iustin Pop
4461 901a65c1 Iustin Pop
    # check primary node
4462 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4463 7baf741d Guido Trotter
    assert self.pnode is not None, \
4464 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4465 7527a8a4 Iustin Pop
    if pnode.offline:
4466 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4467 7527a8a4 Iustin Pop
                                 pnode.name)
4468 7527a8a4 Iustin Pop
4469 901a65c1 Iustin Pop
    self.secondaries = []
4470 901a65c1 Iustin Pop
4471 901a65c1 Iustin Pop
    # mirror node verification
4472 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4473 7baf741d Guido Trotter
      if self.op.snode is None:
4474 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4475 3ecf6786 Iustin Pop
                                   " a mirror node")
4476 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4477 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4478 3ecf6786 Iustin Pop
                                   " the primary node.")
4479 7baf741d Guido Trotter
      self.secondaries.append(self.op.snode)
4480 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4481 a8083063 Iustin Pop
4482 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4483 6785674e Iustin Pop
4484 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4485 08db7c5c Iustin Pop
                                self.disks)
4486 ed1ebc60 Guido Trotter
4487 8d75db10 Iustin Pop
    # Check lv size requirements
4488 8d75db10 Iustin Pop
    if req_size is not None:
4489 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4490 72737a7f Iustin Pop
                                         self.op.hypervisor)
4491 8d75db10 Iustin Pop
      for node in nodenames:
4492 781de953 Iustin Pop
        info = nodeinfo[node]
4493 781de953 Iustin Pop
        info.Raise()
4494 781de953 Iustin Pop
        info = info.data
4495 8d75db10 Iustin Pop
        if not info:
4496 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4497 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4498 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4499 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4500 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4501 8d75db10 Iustin Pop
                                     " node %s" % node)
4502 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4503 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4504 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4505 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4506 ed1ebc60 Guido Trotter
4507 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4508 6785674e Iustin Pop
4509 a8083063 Iustin Pop
    # os verification
4510 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4511 781de953 Iustin Pop
    result.Raise()
4512 781de953 Iustin Pop
    if not isinstance(result.data, objects.OS):
4513 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4514 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4515 a8083063 Iustin Pop
4516 901a65c1 Iustin Pop
    # bridge check on primary node
4517 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4518 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4519 781de953 Iustin Pop
    result.Raise()
4520 781de953 Iustin Pop
    if not result.data:
4521 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4522 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4523 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4524 a8083063 Iustin Pop
4525 49ce1563 Iustin Pop
    # memory check on primary node
4526 49ce1563 Iustin Pop
    if self.op.start:
4527 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4528 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4529 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4530 338e51e8 Iustin Pop
                           self.op.hypervisor)
4531 49ce1563 Iustin Pop
4532 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4533 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4534 a8083063 Iustin Pop

4535 a8083063 Iustin Pop
    """
4536 a8083063 Iustin Pop
    instance = self.op.instance_name
4537 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4538 a8083063 Iustin Pop
4539 08db7c5c Iustin Pop
    for nic in self.nics:
4540 08db7c5c Iustin Pop
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4541 08db7c5c Iustin Pop
        nic.mac = self.cfg.GenerateMAC()
4542 a8083063 Iustin Pop
4543 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4544 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4545 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4546 2a6469d5 Alexander Schreiber
    else:
4547 2a6469d5 Alexander Schreiber
      network_port = None
4548 58acb49d Alexander Schreiber
4549 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4550 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4551 31a853d2 Iustin Pop
4552 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4553 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4554 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4555 2c313123 Manuel Franceschini
    else:
4556 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4557 2c313123 Manuel Franceschini
4558 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4559 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4560 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4561 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4562 0f1a06e3 Manuel Franceschini
4563 0f1a06e3 Manuel Franceschini
4564 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4565 a8083063 Iustin Pop
                                  self.op.disk_template,
4566 a8083063 Iustin Pop
                                  instance, pnode_name,
4567 08db7c5c Iustin Pop
                                  self.secondaries,
4568 08db7c5c Iustin Pop
                                  self.disks,
4569 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4570 e2a65344 Iustin Pop
                                  self.op.file_driver,
4571 e2a65344 Iustin Pop
                                  0)
4572 a8083063 Iustin Pop
4573 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4574 a8083063 Iustin Pop
                            primary_node=pnode_name,
4575 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4576 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4577 4978db17 Iustin Pop
                            admin_up=False,
4578 58acb49d Alexander Schreiber
                            network_port=network_port,
4579 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4580 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4581 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4582 a8083063 Iustin Pop
                            )
4583 a8083063 Iustin Pop
4584 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4585 796cab27 Iustin Pop
    try:
4586 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4587 796cab27 Iustin Pop
    except errors.OpExecError:
4588 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4589 796cab27 Iustin Pop
      try:
4590 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4591 796cab27 Iustin Pop
      finally:
4592 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4593 796cab27 Iustin Pop
        raise
4594 a8083063 Iustin Pop
4595 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4596 a8083063 Iustin Pop
4597 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4598 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4599 7baf741d Guido Trotter
    # added the instance to the config
4600 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4601 e36e96b4 Guido Trotter
    # Unlock all the nodes
4602 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4603 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4604 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4605 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4606 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4607 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4608 9c8971d7 Guido Trotter
    else:
4609 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4610 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4611 a8083063 Iustin Pop
4612 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4613 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4614 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4615 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4616 a8083063 Iustin Pop
      time.sleep(15)
4617 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4618 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4619 a8083063 Iustin Pop
    else:
4620 a8083063 Iustin Pop
      disk_abort = False
4621 a8083063 Iustin Pop
4622 a8083063 Iustin Pop
    if disk_abort:
4623 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4624 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4625 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4626 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4627 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4628 3ecf6786 Iustin Pop
                               " this instance")
4629 a8083063 Iustin Pop
4630 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4631 a8083063 Iustin Pop
                (instance, pnode_name))
4632 a8083063 Iustin Pop
4633 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4634 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4635 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4636 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4637 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
4638 20e01edd Iustin Pop
        if msg:
4639 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
4640 20e01edd Iustin Pop
                                   " on node %s: %s" %
4641 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
4642 a8083063 Iustin Pop
4643 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4644 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4645 a8083063 Iustin Pop
        src_node = self.op.src_node
4646 09acf207 Guido Trotter
        src_images = self.src_images
4647 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4648 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4649 09acf207 Guido Trotter
                                                         src_node, src_images,
4650 6c0af70e Guido Trotter
                                                         cluster_name)
4651 781de953 Iustin Pop
        import_result.Raise()
4652 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
4653 09acf207 Guido Trotter
          if not result:
4654 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
4655 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
4656 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
4657 a8083063 Iustin Pop
      else:
4658 a8083063 Iustin Pop
        # also checked in the prereq part
4659 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4660 3ecf6786 Iustin Pop
                                     % self.op.mode)
4661 a8083063 Iustin Pop
4662 a8083063 Iustin Pop
    if self.op.start:
4663 4978db17 Iustin Pop
      iobj.admin_up = True
4664 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4665 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4666 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4667 781de953 Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None)
4668 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
4669 dd279568 Iustin Pop
      if msg:
4670 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
4671 a8083063 Iustin Pop
4672 a8083063 Iustin Pop
4673 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4674 a8083063 Iustin Pop
  """Connect to an instance's console.
4675 a8083063 Iustin Pop

4676 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4677 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4678 a8083063 Iustin Pop
  console.
4679 a8083063 Iustin Pop

4680 a8083063 Iustin Pop
  """
4681 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4682 8659b73e Guido Trotter
  REQ_BGL = False
4683 8659b73e Guido Trotter
4684 8659b73e Guido Trotter
  def ExpandNames(self):
4685 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
4686 a8083063 Iustin Pop
4687 a8083063 Iustin Pop
  def CheckPrereq(self):
4688 a8083063 Iustin Pop
    """Check prerequisites.
4689 a8083063 Iustin Pop

4690 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4691 a8083063 Iustin Pop

4692 a8083063 Iustin Pop
    """
4693 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4694 8659b73e Guido Trotter
    assert self.instance is not None, \
4695 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4696 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
4697 a8083063 Iustin Pop
4698 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4699 a8083063 Iustin Pop
    """Connect to the console of an instance
4700 a8083063 Iustin Pop

4701 a8083063 Iustin Pop
    """
4702 a8083063 Iustin Pop
    instance = self.instance
4703 a8083063 Iustin Pop
    node = instance.primary_node
4704 a8083063 Iustin Pop
4705 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
4706 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
4707 781de953 Iustin Pop
    node_insts.Raise()
4708 a8083063 Iustin Pop
4709 781de953 Iustin Pop
    if instance.name not in node_insts.data:
4710 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4711 a8083063 Iustin Pop
4712 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4713 a8083063 Iustin Pop
4714 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4715 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4716 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
4717 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
4718 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
4719 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
4720 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4721 b047857b Michael Hanselmann
4722 82122173 Iustin Pop
    # build ssh cmdline
4723 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4724 a8083063 Iustin Pop
4725 a8083063 Iustin Pop
4726 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4727 a8083063 Iustin Pop
  """Replace the disks of an instance.
4728 a8083063 Iustin Pop

4729 a8083063 Iustin Pop
  """
4730 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4731 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4732 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4733 efd990e4 Guido Trotter
  REQ_BGL = False
4734 efd990e4 Guido Trotter
4735 7e9366f7 Iustin Pop
  def CheckArguments(self):
4736 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4737 efd990e4 Guido Trotter
      self.op.remote_node = None
4738 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
4739 7e9366f7 Iustin Pop
      self.op.iallocator = None
4740 7e9366f7 Iustin Pop
4741 7e9366f7 Iustin Pop
    # check for valid parameter combination
4742 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4743 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
4744 7e9366f7 Iustin Pop
      if cnt == 2:
4745 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
4746 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
4747 7e9366f7 Iustin Pop
                                   " new node given")
4748 7e9366f7 Iustin Pop
      elif cnt == 0:
4749 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4750 efd990e4 Guido Trotter
                                   " secondary, not both")
4751 7e9366f7 Iustin Pop
    else: # not replacing the secondary
4752 7e9366f7 Iustin Pop
      if cnt != 2:
4753 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
4754 7e9366f7 Iustin Pop
                                   " be used only when changing the"
4755 7e9366f7 Iustin Pop
                                   " secondary node")
4756 7e9366f7 Iustin Pop
4757 7e9366f7 Iustin Pop
  def ExpandNames(self):
4758 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
4759 7e9366f7 Iustin Pop
4760 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4761 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4762 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
4763 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4764 efd990e4 Guido Trotter
      if remote_node is None:
4765 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
4766 efd990e4 Guido Trotter
                                   self.op.remote_node)
4767 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
4768 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
4769 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
4770 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
4771 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
4772 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4773 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4774 efd990e4 Guido Trotter
    else:
4775 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
4776 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4777 efd990e4 Guido Trotter
4778 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
4779 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
4780 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
4781 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
4782 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
4783 efd990e4 Guido Trotter
      self._LockInstancesNodes()
4784 a8083063 Iustin Pop
4785 b6e82a65 Iustin Pop
  def _RunAllocator(self):
4786 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
4787 b6e82a65 Iustin Pop

4788 b6e82a65 Iustin Pop
    """
4789 72737a7f Iustin Pop
    ial = IAllocator(self,
4790 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
4791 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
4792 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
4793 b6e82a65 Iustin Pop
4794 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
4795 b6e82a65 Iustin Pop
4796 b6e82a65 Iustin Pop
    if not ial.success:
4797 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4798 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4799 b6e82a65 Iustin Pop
                                                           ial.info))
4800 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4801 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4802 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
4803 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
4804 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
4805 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
4806 86d9d3bb Iustin Pop
                 self.op.remote_node)
4807 b6e82a65 Iustin Pop
4808 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4809 a8083063 Iustin Pop
    """Build hooks env.
4810 a8083063 Iustin Pop

4811 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
4812 a8083063 Iustin Pop

4813 a8083063 Iustin Pop
    """
4814 a8083063 Iustin Pop
    env = {
4815 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
4816 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
4817 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
4818 a8083063 Iustin Pop
      }
4819 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4820 0834c866 Iustin Pop
    nl = [
4821 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
4822 0834c866 Iustin Pop
      self.instance.primary_node,
4823 0834c866 Iustin Pop
      ]
4824 0834c866 Iustin Pop
    if self.op.remote_node is not None:
4825 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
4826 a8083063 Iustin Pop
    return env, nl, nl
4827 a8083063 Iustin Pop
4828 a8083063 Iustin Pop
  def CheckPrereq(self):
4829 a8083063 Iustin Pop
    """Check prerequisites.
4830 a8083063 Iustin Pop

4831 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4832 a8083063 Iustin Pop

4833 a8083063 Iustin Pop
    """
4834 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4835 efd990e4 Guido Trotter
    assert instance is not None, \
4836 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4837 a8083063 Iustin Pop
    self.instance = instance
4838 a8083063 Iustin Pop
4839 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
4840 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
4841 7e9366f7 Iustin Pop
                                 " instances")
4842 a8083063 Iustin Pop
4843 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
4844 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
4845 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
4846 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
4847 a8083063 Iustin Pop
4848 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
4849 a9e0c397 Iustin Pop
4850 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4851 de8c7666 Guido Trotter
      self._RunAllocator()
4852 b6e82a65 Iustin Pop
4853 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
4854 a9e0c397 Iustin Pop
    if remote_node is not None:
4855 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
4856 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
4857 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
4858 a9e0c397 Iustin Pop
    else:
4859 a9e0c397 Iustin Pop
      self.remote_node_info = None
4860 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
4861 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
4862 3ecf6786 Iustin Pop
                                 " the instance.")
4863 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
4864 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
4865 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
4866 7e9366f7 Iustin Pop
4867 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
4868 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
4869 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
4870 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
4871 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
4872 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
4873 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
4874 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
4875 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
4876 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
4877 7e9366f7 Iustin Pop
    else:
4878 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
4879 7e9366f7 Iustin Pop
4880 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
4881 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
4882 a9e0c397 Iustin Pop
4883 54155f52 Iustin Pop
    if not self.op.disks:
4884 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
4885 54155f52 Iustin Pop
4886 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
4887 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
4888 a8083063 Iustin Pop
4889 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
4890 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
4891 a9e0c397 Iustin Pop

4892 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
4893 e4376078 Iustin Pop

4894 e4376078 Iustin Pop
      1. for each disk to be replaced:
4895 e4376078 Iustin Pop

4896 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
4897 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
4898 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
4899 e4376078 Iustin Pop
        1. rename new LVs to old LVs
4900 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
4901 e4376078 Iustin Pop

4902 e4376078 Iustin Pop
      1. wait for sync across all devices
4903 e4376078 Iustin Pop

4904 e4376078 Iustin Pop
      1. for each modified disk:
4905 e4376078 Iustin Pop

4906 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
4907 a9e0c397 Iustin Pop

4908 a9e0c397 Iustin Pop
    Failures are not very well handled.
4909 cff90b79 Iustin Pop

4910 a9e0c397 Iustin Pop
    """
4911 cff90b79 Iustin Pop
    steps_total = 6
4912 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
4913 a9e0c397 Iustin Pop
    instance = self.instance
4914 a9e0c397 Iustin Pop
    iv_names = {}
4915 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
4916 a9e0c397 Iustin Pop
    # start of work
4917 a9e0c397 Iustin Pop
    cfg = self.cfg
4918 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
4919 cff90b79 Iustin Pop
    oth_node = self.oth_node
4920 cff90b79 Iustin Pop
4921 cff90b79 Iustin Pop
    # Step: check device activation
4922 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
4923 cff90b79 Iustin Pop
    info("checking volume groups")
4924 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
4925 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
4926 cff90b79 Iustin Pop
    if not results:
4927 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
4928 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
4929 781de953 Iustin Pop
      res = results[node]
4930 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
4931 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
4932 cff90b79 Iustin Pop
                                 (my_vg, node))
4933 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4934 54155f52 Iustin Pop
      if idx not in self.op.disks:
4935 cff90b79 Iustin Pop
        continue
4936 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
4937 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
4938 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
4939 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
4940 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
4941 23829f6f Iustin Pop
        if not msg and not result.payload:
4942 23829f6f Iustin Pop
          msg = "disk not found"
4943 23829f6f Iustin Pop
        if msg:
4944 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
4945 23829f6f Iustin Pop
                                   (idx, node, msg))
4946 cff90b79 Iustin Pop
4947 cff90b79 Iustin Pop
    # Step: check other node consistency
4948 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
4949 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4950 54155f52 Iustin Pop
      if idx not in self.op.disks:
4951 cff90b79 Iustin Pop
        continue
4952 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
4953 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
4954 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
4955 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
4956 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
4957 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
4958 cff90b79 Iustin Pop
4959 cff90b79 Iustin Pop
    # Step: create new storage
4960 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
4961 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
4962 54155f52 Iustin Pop
      if idx not in self.op.disks:
4963 a9e0c397 Iustin Pop
        continue
4964 a9e0c397 Iustin Pop
      size = dev.size
4965 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
4966 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
4967 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
4968 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
4969 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4970 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
4971 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4972 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
4973 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
4974 a9e0c397 Iustin Pop
      old_lvs = dev.children
4975 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
4976 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
4977 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
4978 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
4979 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
4980 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
4981 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
4982 a9e0c397 Iustin Pop
4983 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
4984 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
4985 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
4986 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
4987 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
4988 781de953 Iustin Pop
      result.Raise()
4989 781de953 Iustin Pop
      if not result.data:
4990 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
4991 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
4992 cff90b79 Iustin Pop
      #dev.children = []
4993 cff90b79 Iustin Pop
      #cfg.Update(instance)
4994 a9e0c397 Iustin Pop
4995 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
4996 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
4997 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
4998 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
4999 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5000 cff90b79 Iustin Pop
5001 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5002 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5003 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5004 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5005 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5006 cff90b79 Iustin Pop
      rlist = []
5007 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5008 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5009 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5010 23829f6f Iustin Pop
          # device exists
5011 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5012 cff90b79 Iustin Pop
5013 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5014 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5015 781de953 Iustin Pop
      result.Raise()
5016 781de953 Iustin Pop
      if not result.data:
5017 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5018 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5019 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5020 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5021 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5022 781de953 Iustin Pop
      result.Raise()
5023 781de953 Iustin Pop
      if not result.data:
5024 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5025 cff90b79 Iustin Pop
5026 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5027 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5028 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5029 a9e0c397 Iustin Pop
5030 cff90b79 Iustin Pop
      for disk in old_lvs:
5031 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5032 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5033 a9e0c397 Iustin Pop
5034 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5035 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5036 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5037 781de953 Iustin Pop
      if result.failed or not result.data:
5038 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5039 781de953 Iustin Pop
          result = self.rpc.call_blockdev_remove(tgt_node, new_lv)
5040 781de953 Iustin Pop
          if result.failed or not result.data:
5041 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
5042 cff90b79 Iustin Pop
                    " logical volumes")
5043 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5044 a9e0c397 Iustin Pop
5045 a9e0c397 Iustin Pop
      dev.children = new_lvs
5046 a9e0c397 Iustin Pop
      cfg.Update(instance)
5047 a9e0c397 Iustin Pop
5048 cff90b79 Iustin Pop
    # Step: wait for sync
5049 a9e0c397 Iustin Pop
5050 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5051 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5052 a9e0c397 Iustin Pop
    # return value
5053 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5054 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5055 a9e0c397 Iustin Pop
5056 a9e0c397 Iustin Pop
    # so check manually all the devices
5057 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5058 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5059 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5060 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5061 23829f6f Iustin Pop
      if not msg and not result.payload:
5062 23829f6f Iustin Pop
        msg = "disk not found"
5063 23829f6f Iustin Pop
      if msg:
5064 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5065 23829f6f Iustin Pop
                                 (name, msg))
5066 23829f6f Iustin Pop
      if result.payload[5]:
5067 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5068 a9e0c397 Iustin Pop
5069 cff90b79 Iustin Pop
    # Step: remove old storage
5070 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5071 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5072 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5073 a9e0c397 Iustin Pop
      for lv in old_lvs:
5074 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5075 781de953 Iustin Pop
        result = self.rpc.call_blockdev_remove(tgt_node, lv)
5076 781de953 Iustin Pop
        if result.failed or not result.data:
5077 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
5078 a9e0c397 Iustin Pop
          continue
5079 a9e0c397 Iustin Pop
5080 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5081 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5082 a9e0c397 Iustin Pop

5083 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5084 a9e0c397 Iustin Pop
      - for all disks of the instance:
5085 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5086 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5087 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5088 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5089 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5090 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5091 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5092 a9e0c397 Iustin Pop
          not network enabled
5093 a9e0c397 Iustin Pop
      - wait for sync across all devices
5094 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5095 a9e0c397 Iustin Pop

5096 a9e0c397 Iustin Pop
    Failures are not very well handled.
5097 0834c866 Iustin Pop

5098 a9e0c397 Iustin Pop
    """
5099 0834c866 Iustin Pop
    steps_total = 6
5100 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5101 a9e0c397 Iustin Pop
    instance = self.instance
5102 a9e0c397 Iustin Pop
    iv_names = {}
5103 a9e0c397 Iustin Pop
    # start of work
5104 a9e0c397 Iustin Pop
    cfg = self.cfg
5105 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5106 a9e0c397 Iustin Pop
    new_node = self.new_node
5107 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5108 a2d59d8b Iustin Pop
    nodes_ip = {
5109 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5110 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5111 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5112 a2d59d8b Iustin Pop
      }
5113 0834c866 Iustin Pop
5114 0834c866 Iustin Pop
    # Step: check device activation
5115 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5116 0834c866 Iustin Pop
    info("checking volume groups")
5117 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5118 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5119 0834c866 Iustin Pop
    for node in pri_node, new_node:
5120 781de953 Iustin Pop
      res = results[node]
5121 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5122 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5123 0834c866 Iustin Pop
                                 (my_vg, node))
5124 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5125 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5126 0834c866 Iustin Pop
        continue
5127 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5128 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5129 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5130 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5131 23829f6f Iustin Pop
      if not msg and not result.payload:
5132 23829f6f Iustin Pop
        msg = "disk not found"
5133 23829f6f Iustin Pop
      if msg:
5134 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5135 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5136 0834c866 Iustin Pop
5137 0834c866 Iustin Pop
    # Step: check other node consistency
5138 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5139 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5140 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5141 0834c866 Iustin Pop
        continue
5142 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5143 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5144 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5145 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5146 0834c866 Iustin Pop
                                 pri_node)
5147 0834c866 Iustin Pop
5148 0834c866 Iustin Pop
    # Step: create new storage
5149 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5150 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5151 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5152 d418ebfb Iustin Pop
           (new_node, idx))
5153 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5154 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5155 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5156 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5157 a9e0c397 Iustin Pop
5158 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5159 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5160 a1578d63 Iustin Pop
    # error and the success paths
5161 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5162 a1578d63 Iustin Pop
                                   instance.name)
5163 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5164 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5165 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5166 0834c866 Iustin Pop
      size = dev.size
5167 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5168 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5169 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5170 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5171 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5172 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5173 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5174 a2d59d8b Iustin Pop
        p_minor = o_minor1
5175 ffa1c0dc Iustin Pop
      else:
5176 a2d59d8b Iustin Pop
        p_minor = o_minor2
5177 a2d59d8b Iustin Pop
5178 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5179 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5180 a2d59d8b Iustin Pop
5181 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5182 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5183 a2d59d8b Iustin Pop
                    new_net_id)
5184 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5185 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5186 a9e0c397 Iustin Pop
                              children=dev.children)
5187 796cab27 Iustin Pop
      try:
5188 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5189 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5190 1492cca7 Iustin Pop
      except errors.BlockDeviceError:
5191 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5192 796cab27 Iustin Pop
        raise
5193 a9e0c397 Iustin Pop
5194 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5195 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5196 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5197 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5198 781de953 Iustin Pop
      result = self.rpc.call_blockdev_shutdown(old_node, dev)
5199 781de953 Iustin Pop
      if result.failed or not result.data:
5200 d418ebfb Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node" % idx,
5201 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5202 a9e0c397 Iustin Pop
5203 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5204 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5205 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5206 642445d9 Iustin Pop
5207 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5208 a2d59d8b Iustin Pop
    if msg:
5209 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5210 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5211 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5212 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5213 642445d9 Iustin Pop
5214 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5215 642445d9 Iustin Pop
    # the instance to point to the new secondary
5216 642445d9 Iustin Pop
    info("updating instance configuration")
5217 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5218 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5219 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5220 642445d9 Iustin Pop
    cfg.Update(instance)
5221 a9e0c397 Iustin Pop
5222 642445d9 Iustin Pop
    # and now perform the drbd attach
5223 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5224 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5225 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5226 a2d59d8b Iustin Pop
                                           False)
5227 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5228 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5229 a2d59d8b Iustin Pop
      if msg:
5230 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5231 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5232 a2d59d8b Iustin Pop
                " status of disks")
5233 a9e0c397 Iustin Pop
5234 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5235 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5236 a9e0c397 Iustin Pop
    # return value
5237 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5238 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5239 a9e0c397 Iustin Pop
5240 a9e0c397 Iustin Pop
    # so check manually all the devices
5241 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5242 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5243 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5244 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5245 23829f6f Iustin Pop
      if not msg and not result.payload:
5246 23829f6f Iustin Pop
        msg = "disk not found"
5247 23829f6f Iustin Pop
      if msg:
5248 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5249 23829f6f Iustin Pop
                                 (idx, msg))
5250 23829f6f Iustin Pop
      if result.payload[5]:
5251 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5252 a9e0c397 Iustin Pop
5253 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5254 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5255 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5256 a9e0c397 Iustin Pop
      for lv in old_lvs:
5257 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5258 781de953 Iustin Pop
        result = self.rpc.call_blockdev_remove(old_node, lv)
5259 781de953 Iustin Pop
        if result.failed or not result.data:
5260 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
5261 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5262 a9e0c397 Iustin Pop
5263 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5264 a9e0c397 Iustin Pop
    """Execute disk replacement.
5265 a9e0c397 Iustin Pop

5266 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5267 a9e0c397 Iustin Pop

5268 a9e0c397 Iustin Pop
    """
5269 a9e0c397 Iustin Pop
    instance = self.instance
5270 22985314 Guido Trotter
5271 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5272 0d68c45d Iustin Pop
    if not instance.admin_up:
5273 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5274 22985314 Guido Trotter
5275 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5276 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5277 a9e0c397 Iustin Pop
    else:
5278 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5279 22985314 Guido Trotter
5280 22985314 Guido Trotter
    ret = fn(feedback_fn)
5281 22985314 Guido Trotter
5282 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5283 0d68c45d Iustin Pop
    if not instance.admin_up:
5284 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5285 22985314 Guido Trotter
5286 22985314 Guido Trotter
    return ret
5287 a9e0c397 Iustin Pop
5288 a8083063 Iustin Pop
5289 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5290 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5291 8729e0d7 Iustin Pop

5292 8729e0d7 Iustin Pop
  """
5293 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5294 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5295 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5296 31e63dbf Guido Trotter
  REQ_BGL = False
5297 31e63dbf Guido Trotter
5298 31e63dbf Guido Trotter
  def ExpandNames(self):
5299 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5300 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5301 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5302 31e63dbf Guido Trotter
5303 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5304 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5305 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5306 8729e0d7 Iustin Pop
5307 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5308 8729e0d7 Iustin Pop
    """Build hooks env.
5309 8729e0d7 Iustin Pop

5310 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5311 8729e0d7 Iustin Pop

5312 8729e0d7 Iustin Pop
    """
5313 8729e0d7 Iustin Pop
    env = {
5314 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5315 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5316 8729e0d7 Iustin Pop
      }
5317 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5318 8729e0d7 Iustin Pop
    nl = [
5319 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5320 8729e0d7 Iustin Pop
      self.instance.primary_node,
5321 8729e0d7 Iustin Pop
      ]
5322 8729e0d7 Iustin Pop
    return env, nl, nl
5323 8729e0d7 Iustin Pop
5324 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5325 8729e0d7 Iustin Pop
    """Check prerequisites.
5326 8729e0d7 Iustin Pop

5327 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5328 8729e0d7 Iustin Pop

5329 8729e0d7 Iustin Pop
    """
5330 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5331 31e63dbf Guido Trotter
    assert instance is not None, \
5332 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5333 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5334 6b12959c Iustin Pop
    for node in nodenames:
5335 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5336 7527a8a4 Iustin Pop
5337 31e63dbf Guido Trotter
5338 8729e0d7 Iustin Pop
    self.instance = instance
5339 8729e0d7 Iustin Pop
5340 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5341 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5342 8729e0d7 Iustin Pop
                                 " growing.")
5343 8729e0d7 Iustin Pop
5344 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5345 8729e0d7 Iustin Pop
5346 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5347 72737a7f Iustin Pop
                                       instance.hypervisor)
5348 8729e0d7 Iustin Pop
    for node in nodenames:
5349 781de953 Iustin Pop
      info = nodeinfo[node]
5350 781de953 Iustin Pop
      if info.failed or not info.data:
5351 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5352 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5353 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5354 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5355 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5356 8729e0d7 Iustin Pop
                                   " node %s" % node)
5357 781de953 Iustin Pop
      if self.op.amount > vg_free:
5358 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5359 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5360 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5361 8729e0d7 Iustin Pop
5362 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5363 8729e0d7 Iustin Pop
    """Execute disk grow.
5364 8729e0d7 Iustin Pop

5365 8729e0d7 Iustin Pop
    """
5366 8729e0d7 Iustin Pop
    instance = self.instance
5367 ad24e046 Iustin Pop
    disk = self.disk
5368 6b12959c Iustin Pop
    for node in instance.all_nodes:
5369 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5370 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5371 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5372 0959c824 Iustin Pop
      if msg:
5373 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5374 0959c824 Iustin Pop
                                 (node, msg))
5375 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5376 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5377 6605411d Iustin Pop
    if self.op.wait_for_sync:
5378 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5379 6605411d Iustin Pop
      if disk_abort:
5380 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5381 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5382 8729e0d7 Iustin Pop
5383 8729e0d7 Iustin Pop
5384 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5385 a8083063 Iustin Pop
  """Query runtime instance data.
5386 a8083063 Iustin Pop

5387 a8083063 Iustin Pop
  """
5388 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5389 a987fa48 Guido Trotter
  REQ_BGL = False
5390 ae5849b5 Michael Hanselmann
5391 a987fa48 Guido Trotter
  def ExpandNames(self):
5392 a987fa48 Guido Trotter
    self.needed_locks = {}
5393 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5394 a987fa48 Guido Trotter
5395 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5396 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5397 a987fa48 Guido Trotter
5398 a987fa48 Guido Trotter
    if self.op.instances:
5399 a987fa48 Guido Trotter
      self.wanted_names = []
5400 a987fa48 Guido Trotter
      for name in self.op.instances:
5401 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5402 a987fa48 Guido Trotter
        if full_name is None:
5403 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5404 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5405 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5406 a987fa48 Guido Trotter
    else:
5407 a987fa48 Guido Trotter
      self.wanted_names = None
5408 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5409 a987fa48 Guido Trotter
5410 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5411 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5412 a987fa48 Guido Trotter
5413 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5414 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5415 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5416 a8083063 Iustin Pop
5417 a8083063 Iustin Pop
  def CheckPrereq(self):
5418 a8083063 Iustin Pop
    """Check prerequisites.
5419 a8083063 Iustin Pop

5420 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5421 a8083063 Iustin Pop

5422 a8083063 Iustin Pop
    """
5423 a987fa48 Guido Trotter
    if self.wanted_names is None:
5424 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5425 a8083063 Iustin Pop
5426 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5427 a987fa48 Guido Trotter
                             in self.wanted_names]
5428 a987fa48 Guido Trotter
    return
5429 a8083063 Iustin Pop
5430 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5431 a8083063 Iustin Pop
    """Compute block device status.
5432 a8083063 Iustin Pop

5433 a8083063 Iustin Pop
    """
5434 57821cac Iustin Pop
    static = self.op.static
5435 57821cac Iustin Pop
    if not static:
5436 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5437 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5438 23829f6f Iustin Pop
      msg = dev_pstatus.RemoteFailMsg()
5439 23829f6f Iustin Pop
      if msg:
5440 23829f6f Iustin Pop
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5441 23829f6f Iustin Pop
                                 (instance.name, msg))
5442 23829f6f Iustin Pop
      dev_pstatus = dev_pstatus.payload
5443 57821cac Iustin Pop
    else:
5444 57821cac Iustin Pop
      dev_pstatus = None
5445 57821cac Iustin Pop
5446 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5447 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5448 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5449 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5450 a8083063 Iustin Pop
      else:
5451 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5452 a8083063 Iustin Pop
5453 57821cac Iustin Pop
    if snode and not static:
5454 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5455 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5456 23829f6f Iustin Pop
      msg = dev_sstatus.RemoteFailMsg()
5457 23829f6f Iustin Pop
      if msg:
5458 23829f6f Iustin Pop
        raise errors.OpExecError("Can't compute disk status for %s: %s" %
5459 23829f6f Iustin Pop
                                 (instance.name, msg))
5460 23829f6f Iustin Pop
      dev_sstatus = dev_sstatus.payload
5461 a8083063 Iustin Pop
    else:
5462 a8083063 Iustin Pop
      dev_sstatus = None
5463 a8083063 Iustin Pop
5464 a8083063 Iustin Pop
    if dev.children:
5465 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5466 a8083063 Iustin Pop
                      for child in dev.children]
5467 a8083063 Iustin Pop
    else:
5468 a8083063 Iustin Pop
      dev_children = []
5469 a8083063 Iustin Pop
5470 a8083063 Iustin Pop
    data = {
5471 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5472 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5473 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5474 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5475 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5476 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5477 a8083063 Iustin Pop
      "children": dev_children,
5478 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5479 a8083063 Iustin Pop
      }
5480 a8083063 Iustin Pop
5481 a8083063 Iustin Pop
    return data
5482 a8083063 Iustin Pop
5483 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5484 a8083063 Iustin Pop
    """Gather and return data"""
5485 a8083063 Iustin Pop
    result = {}
5486 338e51e8 Iustin Pop
5487 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5488 338e51e8 Iustin Pop
5489 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5490 57821cac Iustin Pop
      if not self.op.static:
5491 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5492 57821cac Iustin Pop
                                                  instance.name,
5493 57821cac Iustin Pop
                                                  instance.hypervisor)
5494 781de953 Iustin Pop
        remote_info.Raise()
5495 781de953 Iustin Pop
        remote_info = remote_info.data
5496 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5497 57821cac Iustin Pop
          remote_state = "up"
5498 57821cac Iustin Pop
        else:
5499 57821cac Iustin Pop
          remote_state = "down"
5500 a8083063 Iustin Pop
      else:
5501 57821cac Iustin Pop
        remote_state = None
5502 0d68c45d Iustin Pop
      if instance.admin_up:
5503 a8083063 Iustin Pop
        config_state = "up"
5504 0d68c45d Iustin Pop
      else:
5505 0d68c45d Iustin Pop
        config_state = "down"
5506 a8083063 Iustin Pop
5507 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5508 a8083063 Iustin Pop
               for device in instance.disks]
5509 a8083063 Iustin Pop
5510 a8083063 Iustin Pop
      idict = {
5511 a8083063 Iustin Pop
        "name": instance.name,
5512 a8083063 Iustin Pop
        "config_state": config_state,
5513 a8083063 Iustin Pop
        "run_state": remote_state,
5514 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5515 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5516 a8083063 Iustin Pop
        "os": instance.os,
5517 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5518 a8083063 Iustin Pop
        "disks": disks,
5519 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5520 24838135 Iustin Pop
        "network_port": instance.network_port,
5521 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5522 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5523 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5524 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5525 a8083063 Iustin Pop
        }
5526 a8083063 Iustin Pop
5527 a8083063 Iustin Pop
      result[instance.name] = idict
5528 a8083063 Iustin Pop
5529 a8083063 Iustin Pop
    return result
5530 a8083063 Iustin Pop
5531 a8083063 Iustin Pop
5532 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5533 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5534 a8083063 Iustin Pop

5535 a8083063 Iustin Pop
  """
5536 a8083063 Iustin Pop
  HPATH = "instance-modify"
5537 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5538 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5539 1a5c7281 Guido Trotter
  REQ_BGL = False
5540 1a5c7281 Guido Trotter
5541 24991749 Iustin Pop
  def CheckArguments(self):
5542 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5543 24991749 Iustin Pop
      self.op.nics = []
5544 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5545 24991749 Iustin Pop
      self.op.disks = []
5546 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5547 24991749 Iustin Pop
      self.op.beparams = {}
5548 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5549 24991749 Iustin Pop
      self.op.hvparams = {}
5550 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5551 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5552 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5553 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5554 24991749 Iustin Pop
5555 d4b72030 Guido Trotter
    utils.CheckBEParams(self.op.beparams)
5556 d4b72030 Guido Trotter
5557 24991749 Iustin Pop
    # Disk validation
5558 24991749 Iustin Pop
    disk_addremove = 0
5559 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5560 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5561 24991749 Iustin Pop
        disk_addremove += 1
5562 24991749 Iustin Pop
        continue
5563 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5564 24991749 Iustin Pop
        disk_addremove += 1
5565 24991749 Iustin Pop
      else:
5566 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5567 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5568 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5569 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5570 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5571 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5572 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5573 24991749 Iustin Pop
        if size is None:
5574 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5575 24991749 Iustin Pop
        try:
5576 24991749 Iustin Pop
          size = int(size)
5577 24991749 Iustin Pop
        except ValueError, err:
5578 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5579 24991749 Iustin Pop
                                     str(err))
5580 24991749 Iustin Pop
        disk_dict['size'] = size
5581 24991749 Iustin Pop
      else:
5582 24991749 Iustin Pop
        # modification of disk
5583 24991749 Iustin Pop
        if 'size' in disk_dict:
5584 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5585 24991749 Iustin Pop
                                     " grow-disk")
5586 24991749 Iustin Pop
5587 24991749 Iustin Pop
    if disk_addremove > 1:
5588 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5589 24991749 Iustin Pop
                                 " supported at a time")
5590 24991749 Iustin Pop
5591 24991749 Iustin Pop
    # NIC validation
5592 24991749 Iustin Pop
    nic_addremove = 0
5593 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5594 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5595 24991749 Iustin Pop
        nic_addremove += 1
5596 24991749 Iustin Pop
        continue
5597 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5598 24991749 Iustin Pop
        nic_addremove += 1
5599 24991749 Iustin Pop
      else:
5600 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5601 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5602 24991749 Iustin Pop
5603 24991749 Iustin Pop
      # nic_dict should be a dict
5604 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5605 24991749 Iustin Pop
      if nic_ip is not None:
5606 24991749 Iustin Pop
        if nic_ip.lower() == "none":
5607 24991749 Iustin Pop
          nic_dict['ip'] = None
5608 24991749 Iustin Pop
        else:
5609 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5610 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5611 24991749 Iustin Pop
      # we can only check None bridges and assign the default one
5612 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
5613 24991749 Iustin Pop
      if nic_bridge is None:
5614 24991749 Iustin Pop
        nic_dict['bridge'] = self.cfg.GetDefBridge()
5615 24991749 Iustin Pop
      # but we can validate MACs
5616 24991749 Iustin Pop
      nic_mac = nic_dict.get('mac', None)
5617 24991749 Iustin Pop
      if nic_mac is not None:
5618 24991749 Iustin Pop
        if self.cfg.IsMacInUse(nic_mac):
5619 24991749 Iustin Pop
          raise errors.OpPrereqError("MAC address %s already in use"
5620 24991749 Iustin Pop
                                     " in cluster" % nic_mac)
5621 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5622 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5623 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5624 24991749 Iustin Pop
    if nic_addremove > 1:
5625 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5626 24991749 Iustin Pop
                                 " supported at a time")
5627 24991749 Iustin Pop
5628 1a5c7281 Guido Trotter
  def ExpandNames(self):
5629 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5630 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5631 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5632 74409b12 Iustin Pop
5633 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5634 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5635 74409b12 Iustin Pop
      self._LockInstancesNodes()
5636 a8083063 Iustin Pop
5637 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5638 a8083063 Iustin Pop
    """Build hooks env.
5639 a8083063 Iustin Pop

5640 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5641 a8083063 Iustin Pop

5642 a8083063 Iustin Pop
    """
5643 396e1b78 Michael Hanselmann
    args = dict()
5644 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5645 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5646 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5647 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5648 24991749 Iustin Pop
    # FIXME: readd disk/nic changes
5649 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5650 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5651 a8083063 Iustin Pop
    return env, nl, nl
5652 a8083063 Iustin Pop
5653 a8083063 Iustin Pop
  def CheckPrereq(self):
5654 a8083063 Iustin Pop
    """Check prerequisites.
5655 a8083063 Iustin Pop

5656 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
5657 a8083063 Iustin Pop

5658 a8083063 Iustin Pop
    """
5659 24991749 Iustin Pop
    force = self.force = self.op.force
5660 a8083063 Iustin Pop
5661 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
5662 31a853d2 Iustin Pop
5663 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5664 1a5c7281 Guido Trotter
    assert self.instance is not None, \
5665 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5666 6b12959c Iustin Pop
    pnode = instance.primary_node
5667 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
5668 74409b12 Iustin Pop
5669 338e51e8 Iustin Pop
    # hvparams processing
5670 74409b12 Iustin Pop
    if self.op.hvparams:
5671 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
5672 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5673 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5674 74409b12 Iustin Pop
          try:
5675 74409b12 Iustin Pop
            del i_hvdict[key]
5676 74409b12 Iustin Pop
          except KeyError:
5677 74409b12 Iustin Pop
            pass
5678 8edcd611 Guido Trotter
        elif val == constants.VALUE_NONE:
5679 8edcd611 Guido Trotter
          i_hvdict[key] = None
5680 74409b12 Iustin Pop
        else:
5681 74409b12 Iustin Pop
          i_hvdict[key] = val
5682 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5683 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5684 74409b12 Iustin Pop
                                i_hvdict)
5685 74409b12 Iustin Pop
      # local check
5686 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
5687 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
5688 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5689 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
5690 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
5691 338e51e8 Iustin Pop
    else:
5692 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
5693 338e51e8 Iustin Pop
5694 338e51e8 Iustin Pop
    # beparams processing
5695 338e51e8 Iustin Pop
    if self.op.beparams:
5696 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
5697 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5698 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5699 338e51e8 Iustin Pop
          try:
5700 338e51e8 Iustin Pop
            del i_bedict[key]
5701 338e51e8 Iustin Pop
          except KeyError:
5702 338e51e8 Iustin Pop
            pass
5703 338e51e8 Iustin Pop
        else:
5704 338e51e8 Iustin Pop
          i_bedict[key] = val
5705 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5706 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5707 338e51e8 Iustin Pop
                                i_bedict)
5708 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
5709 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
5710 338e51e8 Iustin Pop
    else:
5711 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
5712 74409b12 Iustin Pop
5713 cfefe007 Guido Trotter
    self.warn = []
5714 647a5d80 Iustin Pop
5715 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5716 647a5d80 Iustin Pop
      mem_check_list = [pnode]
5717 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5718 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
5719 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
5720 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5721 72737a7f Iustin Pop
                                                  instance.hypervisor)
5722 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5723 72737a7f Iustin Pop
                                         instance.hypervisor)
5724 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5725 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
5726 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
5727 cfefe007 Guido Trotter
      else:
5728 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
5729 781de953 Iustin Pop
          current_mem = instance_info.data['memory']
5730 cfefe007 Guido Trotter
        else:
5731 cfefe007 Guido Trotter
          # Assume instance not running
5732 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
5733 cfefe007 Guido Trotter
          # and we have no other way to check)
5734 cfefe007 Guido Trotter
          current_mem = 0
5735 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5736 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
5737 cfefe007 Guido Trotter
        if miss_mem > 0:
5738 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
5739 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
5740 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
5741 cfefe007 Guido Trotter
5742 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5743 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
5744 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
5745 ea33068f Iustin Pop
            continue
5746 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
5747 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
5748 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
5749 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
5750 647a5d80 Iustin Pop
                             " secondary node %s" % node)
5751 5bc84f33 Alexander Schreiber
5752 24991749 Iustin Pop
    # NIC processing
5753 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5754 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5755 24991749 Iustin Pop
        if not instance.nics:
5756 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
5757 24991749 Iustin Pop
        continue
5758 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
5759 24991749 Iustin Pop
        # an existing nic
5760 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
5761 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
5762 24991749 Iustin Pop
                                     " are 0 to %d" %
5763 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
5764 24991749 Iustin Pop
      nic_bridge = nic_dict.get('bridge', None)
5765 24991749 Iustin Pop
      if nic_bridge is not None:
5766 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
5767 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
5768 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
5769 24991749 Iustin Pop
          if self.force:
5770 24991749 Iustin Pop
            self.warn.append(msg)
5771 24991749 Iustin Pop
          else:
5772 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
5773 24991749 Iustin Pop
5774 24991749 Iustin Pop
    # DISK processing
5775 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
5776 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
5777 24991749 Iustin Pop
                                 " diskless instances")
5778 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5779 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5780 24991749 Iustin Pop
        if len(instance.disks) == 1:
5781 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
5782 24991749 Iustin Pop
                                     " an instance")
5783 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
5784 24991749 Iustin Pop
        ins_l = ins_l[pnode]
5785 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
5786 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
5787 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
5788 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
5789 24991749 Iustin Pop
                                     " disks.")
5790 24991749 Iustin Pop
5791 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
5792 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
5793 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
5794 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
5795 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
5796 24991749 Iustin Pop
        # an existing disk
5797 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
5798 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
5799 24991749 Iustin Pop
                                     " are 0 to %d" %
5800 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
5801 24991749 Iustin Pop
5802 a8083063 Iustin Pop
    return
5803 a8083063 Iustin Pop
5804 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5805 a8083063 Iustin Pop
    """Modifies an instance.
5806 a8083063 Iustin Pop

5807 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
5808 24991749 Iustin Pop

5809 a8083063 Iustin Pop
    """
5810 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
5811 cfefe007 Guido Trotter
    # feedback_fn there.
5812 cfefe007 Guido Trotter
    for warn in self.warn:
5813 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
5814 cfefe007 Guido Trotter
5815 a8083063 Iustin Pop
    result = []
5816 a8083063 Iustin Pop
    instance = self.instance
5817 24991749 Iustin Pop
    # disk changes
5818 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5819 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5820 24991749 Iustin Pop
        # remove the last disk
5821 24991749 Iustin Pop
        device = instance.disks.pop()
5822 24991749 Iustin Pop
        device_idx = len(instance.disks)
5823 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
5824 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
5825 4cfb9426 Iustin Pop
          rpc_result = self.rpc.call_blockdev_remove(node, disk)
5826 4cfb9426 Iustin Pop
          if rpc_result.failed or not rpc_result.data:
5827 24991749 Iustin Pop
            self.proc.LogWarning("Could not remove disk/%d on node %s,"
5828 24991749 Iustin Pop
                                 " continuing anyway", device_idx, node)
5829 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
5830 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5831 24991749 Iustin Pop
        # add a new disk
5832 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
5833 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
5834 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
5835 24991749 Iustin Pop
        else:
5836 24991749 Iustin Pop
          file_driver = file_path = None
5837 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
5838 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
5839 24991749 Iustin Pop
                                         instance.disk_template,
5840 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
5841 24991749 Iustin Pop
                                         instance.secondary_nodes,
5842 24991749 Iustin Pop
                                         [disk_dict],
5843 24991749 Iustin Pop
                                         file_path,
5844 24991749 Iustin Pop
                                         file_driver,
5845 24991749 Iustin Pop
                                         disk_idx_base)[0]
5846 24991749 Iustin Pop
        instance.disks.append(new_disk)
5847 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
5848 24991749 Iustin Pop
5849 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
5850 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
5851 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
5852 24991749 Iustin Pop
        #HARDCODE
5853 428958aa Iustin Pop
        for node in instance.all_nodes:
5854 428958aa Iustin Pop
          f_create = node == instance.primary_node
5855 796cab27 Iustin Pop
          try:
5856 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
5857 428958aa Iustin Pop
                            f_create, info, f_create)
5858 1492cca7 Iustin Pop
          except errors.OpExecError, err:
5859 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
5860 428958aa Iustin Pop
                            " node %s: %s",
5861 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
5862 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
5863 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
5864 24991749 Iustin Pop
      else:
5865 24991749 Iustin Pop
        # change a given disk
5866 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
5867 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
5868 24991749 Iustin Pop
    # NIC changes
5869 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5870 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5871 24991749 Iustin Pop
        # remove the last nic
5872 24991749 Iustin Pop
        del instance.nics[-1]
5873 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
5874 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5875 24991749 Iustin Pop
        # add a new nic
5876 24991749 Iustin Pop
        if 'mac' not in nic_dict:
5877 24991749 Iustin Pop
          mac = constants.VALUE_GENERATE
5878 24991749 Iustin Pop
        else:
5879 24991749 Iustin Pop
          mac = nic_dict['mac']
5880 24991749 Iustin Pop
        if mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5881 24991749 Iustin Pop
          mac = self.cfg.GenerateMAC()
5882 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
5883 24991749 Iustin Pop
                              bridge=nic_dict.get('bridge', None))
5884 24991749 Iustin Pop
        instance.nics.append(new_nic)
5885 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
5886 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
5887 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
5888 24991749 Iustin Pop
      else:
5889 24991749 Iustin Pop
        # change a given nic
5890 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
5891 24991749 Iustin Pop
          if key in nic_dict:
5892 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
5893 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
5894 24991749 Iustin Pop
5895 24991749 Iustin Pop
    # hvparams changes
5896 74409b12 Iustin Pop
    if self.op.hvparams:
5897 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
5898 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5899 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
5900 24991749 Iustin Pop
5901 24991749 Iustin Pop
    # beparams changes
5902 338e51e8 Iustin Pop
    if self.op.beparams:
5903 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
5904 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5905 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
5906 a8083063 Iustin Pop
5907 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
5908 a8083063 Iustin Pop
5909 a8083063 Iustin Pop
    return result
5910 a8083063 Iustin Pop
5911 a8083063 Iustin Pop
5912 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
5913 a8083063 Iustin Pop
  """Query the exports list
5914 a8083063 Iustin Pop

5915 a8083063 Iustin Pop
  """
5916 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
5917 21a15682 Guido Trotter
  REQ_BGL = False
5918 21a15682 Guido Trotter
5919 21a15682 Guido Trotter
  def ExpandNames(self):
5920 21a15682 Guido Trotter
    self.needed_locks = {}
5921 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
5922 21a15682 Guido Trotter
    if not self.op.nodes:
5923 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5924 21a15682 Guido Trotter
    else:
5925 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
5926 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
5927 a8083063 Iustin Pop
5928 a8083063 Iustin Pop
  def CheckPrereq(self):
5929 21a15682 Guido Trotter
    """Check prerequisites.
5930 a8083063 Iustin Pop

5931 a8083063 Iustin Pop
    """
5932 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
5933 a8083063 Iustin Pop
5934 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5935 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
5936 a8083063 Iustin Pop

5937 e4376078 Iustin Pop
    @rtype: dict
5938 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
5939 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
5940 e4376078 Iustin Pop
        that node.
5941 a8083063 Iustin Pop

5942 a8083063 Iustin Pop
    """
5943 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
5944 b04285f2 Guido Trotter
    result = {}
5945 b04285f2 Guido Trotter
    for node in rpcresult:
5946 b04285f2 Guido Trotter
      if rpcresult[node].failed:
5947 b04285f2 Guido Trotter
        result[node] = False
5948 b04285f2 Guido Trotter
      else:
5949 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
5950 b04285f2 Guido Trotter
5951 b04285f2 Guido Trotter
    return result
5952 a8083063 Iustin Pop
5953 a8083063 Iustin Pop
5954 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
5955 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
5956 a8083063 Iustin Pop

5957 a8083063 Iustin Pop
  """
5958 a8083063 Iustin Pop
  HPATH = "instance-export"
5959 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5960 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
5961 6657590e Guido Trotter
  REQ_BGL = False
5962 6657590e Guido Trotter
5963 6657590e Guido Trotter
  def ExpandNames(self):
5964 6657590e Guido Trotter
    self._ExpandAndLockInstance()
5965 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
5966 6657590e Guido Trotter
    #
5967 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
5968 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
5969 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
5970 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
5971 6657590e Guido Trotter
    #    then one to remove, after
5972 6657590e Guido Trotter
    #  - removing the removal operation altoghether
5973 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5974 6657590e Guido Trotter
5975 6657590e Guido Trotter
  def DeclareLocks(self, level):
5976 6657590e Guido Trotter
    """Last minute lock declaration."""
5977 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
5978 a8083063 Iustin Pop
5979 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5980 a8083063 Iustin Pop
    """Build hooks env.
5981 a8083063 Iustin Pop

5982 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
5983 a8083063 Iustin Pop

5984 a8083063 Iustin Pop
    """
5985 a8083063 Iustin Pop
    env = {
5986 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
5987 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
5988 a8083063 Iustin Pop
      }
5989 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5990 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
5991 a8083063 Iustin Pop
          self.op.target_node]
5992 a8083063 Iustin Pop
    return env, nl, nl
5993 a8083063 Iustin Pop
5994 a8083063 Iustin Pop
  def CheckPrereq(self):
5995 a8083063 Iustin Pop
    """Check prerequisites.
5996 a8083063 Iustin Pop

5997 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
5998 a8083063 Iustin Pop

5999 a8083063 Iustin Pop
    """
6000 6657590e Guido Trotter
    instance_name = self.op.instance_name
6001 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6002 6657590e Guido Trotter
    assert self.instance is not None, \
6003 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6004 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6005 a8083063 Iustin Pop
6006 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6007 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6008 a8083063 Iustin Pop
6009 268b8e42 Iustin Pop
    if self.dst_node is None:
6010 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6011 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6012 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6013 a8083063 Iustin Pop
6014 b6023d6c Manuel Franceschini
    # instance disk type verification
6015 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6016 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6017 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6018 b6023d6c Manuel Franceschini
                                   " file-based disks")
6019 b6023d6c Manuel Franceschini
6020 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6021 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6022 a8083063 Iustin Pop

6023 a8083063 Iustin Pop
    """
6024 a8083063 Iustin Pop
    instance = self.instance
6025 a8083063 Iustin Pop
    dst_node = self.dst_node
6026 a8083063 Iustin Pop
    src_node = instance.primary_node
6027 a8083063 Iustin Pop
    if self.op.shutdown:
6028 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6029 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6030 781de953 Iustin Pop
      result.Raise()
6031 781de953 Iustin Pop
      if not result.data:
6032 38206f3c Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
6033 38206f3c Iustin Pop
                                 (instance.name, src_node))
6034 a8083063 Iustin Pop
6035 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6036 a8083063 Iustin Pop
6037 a8083063 Iustin Pop
    snap_disks = []
6038 a8083063 Iustin Pop
6039 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6040 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6041 998c712c Iustin Pop
    for disk in instance.disks:
6042 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6043 998c712c Iustin Pop
6044 a8083063 Iustin Pop
    try:
6045 a8083063 Iustin Pop
      for disk in instance.disks:
6046 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6047 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6048 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6049 19d7f90a Guido Trotter
          self.LogWarning("Could not snapshot block device %s on node %s",
6050 9a4f63d1 Iustin Pop
                          disk.logical_id[1], src_node)
6051 19d7f90a Guido Trotter
          snap_disks.append(False)
6052 19d7f90a Guido Trotter
        else:
6053 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6054 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6055 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6056 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6057 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6058 a8083063 Iustin Pop
6059 a8083063 Iustin Pop
    finally:
6060 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6061 781de953 Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None)
6062 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6063 dd279568 Iustin Pop
        if msg:
6064 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6065 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6066 a8083063 Iustin Pop
6067 a8083063 Iustin Pop
    # TODO: check for size
6068 a8083063 Iustin Pop
6069 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6070 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6071 19d7f90a Guido Trotter
      if dev:
6072 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6073 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6074 781de953 Iustin Pop
        if result.failed or not result.data:
6075 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
6076 19d7f90a Guido Trotter
                          " node %s", dev.logical_id[1], src_node,
6077 19d7f90a Guido Trotter
                          dst_node.name)
6078 781de953 Iustin Pop
        result = self.rpc.call_blockdev_remove(src_node, dev)
6079 781de953 Iustin Pop
        if result.failed or not result.data:
6080 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
6081 19d7f90a Guido Trotter
                          " %s", dev.logical_id[1], src_node)
6082 a8083063 Iustin Pop
6083 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6084 781de953 Iustin Pop
    if result.failed or not result.data:
6085 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6086 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6087 a8083063 Iustin Pop
6088 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6089 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6090 a8083063 Iustin Pop
6091 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6092 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6093 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6094 a8083063 Iustin Pop
    if nodelist:
6095 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6096 a8083063 Iustin Pop
      for node in exportlist:
6097 781de953 Iustin Pop
        if exportlist[node].failed:
6098 781de953 Iustin Pop
          continue
6099 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6100 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6101 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6102 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6103 5c947f38 Iustin Pop
6104 5c947f38 Iustin Pop
6105 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6106 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6107 9ac99fda Guido Trotter

6108 9ac99fda Guido Trotter
  """
6109 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6110 3656b3af Guido Trotter
  REQ_BGL = False
6111 3656b3af Guido Trotter
6112 3656b3af Guido Trotter
  def ExpandNames(self):
6113 3656b3af Guido Trotter
    self.needed_locks = {}
6114 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6115 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6116 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6117 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6118 9ac99fda Guido Trotter
6119 9ac99fda Guido Trotter
  def CheckPrereq(self):
6120 9ac99fda Guido Trotter
    """Check prerequisites.
6121 9ac99fda Guido Trotter
    """
6122 9ac99fda Guido Trotter
    pass
6123 9ac99fda Guido Trotter
6124 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6125 9ac99fda Guido Trotter
    """Remove any export.
6126 9ac99fda Guido Trotter

6127 9ac99fda Guido Trotter
    """
6128 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6129 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6130 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6131 9ac99fda Guido Trotter
    fqdn_warn = False
6132 9ac99fda Guido Trotter
    if not instance_name:
6133 9ac99fda Guido Trotter
      fqdn_warn = True
6134 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6135 9ac99fda Guido Trotter
6136 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6137 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6138 9ac99fda Guido Trotter
    found = False
6139 9ac99fda Guido Trotter
    for node in exportlist:
6140 781de953 Iustin Pop
      if exportlist[node].failed:
6141 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6142 781de953 Iustin Pop
        continue
6143 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6144 9ac99fda Guido Trotter
        found = True
6145 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6146 781de953 Iustin Pop
        if result.failed or not result.data:
6147 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6148 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6149 9ac99fda Guido Trotter
6150 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6151 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6152 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6153 9ac99fda Guido Trotter
                  " Domain Name.")
6154 9ac99fda Guido Trotter
6155 9ac99fda Guido Trotter
6156 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6157 5c947f38 Iustin Pop
  """Generic tags LU.
6158 5c947f38 Iustin Pop

6159 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6160 5c947f38 Iustin Pop

6161 5c947f38 Iustin Pop
  """
6162 5c947f38 Iustin Pop
6163 8646adce Guido Trotter
  def ExpandNames(self):
6164 8646adce Guido Trotter
    self.needed_locks = {}
6165 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6166 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6167 5c947f38 Iustin Pop
      if name is None:
6168 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6169 3ecf6786 Iustin Pop
                                   (self.op.name,))
6170 5c947f38 Iustin Pop
      self.op.name = name
6171 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6172 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6173 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6174 5c947f38 Iustin Pop
      if name is None:
6175 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6176 3ecf6786 Iustin Pop
                                   (self.op.name,))
6177 5c947f38 Iustin Pop
      self.op.name = name
6178 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6179 8646adce Guido Trotter
6180 8646adce Guido Trotter
  def CheckPrereq(self):
6181 8646adce Guido Trotter
    """Check prerequisites.
6182 8646adce Guido Trotter

6183 8646adce Guido Trotter
    """
6184 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6185 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6186 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6187 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6188 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6189 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6190 5c947f38 Iustin Pop
    else:
6191 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6192 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6193 5c947f38 Iustin Pop
6194 5c947f38 Iustin Pop
6195 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6196 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6197 5c947f38 Iustin Pop

6198 5c947f38 Iustin Pop
  """
6199 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6200 8646adce Guido Trotter
  REQ_BGL = False
6201 5c947f38 Iustin Pop
6202 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6203 5c947f38 Iustin Pop
    """Returns the tag list.
6204 5c947f38 Iustin Pop

6205 5c947f38 Iustin Pop
    """
6206 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6207 5c947f38 Iustin Pop
6208 5c947f38 Iustin Pop
6209 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6210 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6211 73415719 Iustin Pop

6212 73415719 Iustin Pop
  """
6213 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6214 8646adce Guido Trotter
  REQ_BGL = False
6215 8646adce Guido Trotter
6216 8646adce Guido Trotter
  def ExpandNames(self):
6217 8646adce Guido Trotter
    self.needed_locks = {}
6218 73415719 Iustin Pop
6219 73415719 Iustin Pop
  def CheckPrereq(self):
6220 73415719 Iustin Pop
    """Check prerequisites.
6221 73415719 Iustin Pop

6222 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6223 73415719 Iustin Pop

6224 73415719 Iustin Pop
    """
6225 73415719 Iustin Pop
    try:
6226 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6227 73415719 Iustin Pop
    except re.error, err:
6228 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6229 73415719 Iustin Pop
                                 (self.op.pattern, err))
6230 73415719 Iustin Pop
6231 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6232 73415719 Iustin Pop
    """Returns the tag list.
6233 73415719 Iustin Pop

6234 73415719 Iustin Pop
    """
6235 73415719 Iustin Pop
    cfg = self.cfg
6236 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6237 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6238 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6239 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6240 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6241 73415719 Iustin Pop
    results = []
6242 73415719 Iustin Pop
    for path, target in tgts:
6243 73415719 Iustin Pop
      for tag in target.GetTags():
6244 73415719 Iustin Pop
        if self.re.search(tag):
6245 73415719 Iustin Pop
          results.append((path, tag))
6246 73415719 Iustin Pop
    return results
6247 73415719 Iustin Pop
6248 73415719 Iustin Pop
6249 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6250 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6251 5c947f38 Iustin Pop

6252 5c947f38 Iustin Pop
  """
6253 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6254 8646adce Guido Trotter
  REQ_BGL = False
6255 5c947f38 Iustin Pop
6256 5c947f38 Iustin Pop
  def CheckPrereq(self):
6257 5c947f38 Iustin Pop
    """Check prerequisites.
6258 5c947f38 Iustin Pop

6259 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6260 5c947f38 Iustin Pop

6261 5c947f38 Iustin Pop
    """
6262 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6263 f27302fa Iustin Pop
    for tag in self.op.tags:
6264 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6265 5c947f38 Iustin Pop
6266 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6267 5c947f38 Iustin Pop
    """Sets the tag.
6268 5c947f38 Iustin Pop

6269 5c947f38 Iustin Pop
    """
6270 5c947f38 Iustin Pop
    try:
6271 f27302fa Iustin Pop
      for tag in self.op.tags:
6272 f27302fa Iustin Pop
        self.target.AddTag(tag)
6273 5c947f38 Iustin Pop
    except errors.TagError, err:
6274 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6275 5c947f38 Iustin Pop
    try:
6276 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6277 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6278 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6279 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6280 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6281 5c947f38 Iustin Pop
6282 5c947f38 Iustin Pop
6283 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6284 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6285 5c947f38 Iustin Pop

6286 5c947f38 Iustin Pop
  """
6287 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6288 8646adce Guido Trotter
  REQ_BGL = False
6289 5c947f38 Iustin Pop
6290 5c947f38 Iustin Pop
  def CheckPrereq(self):
6291 5c947f38 Iustin Pop
    """Check prerequisites.
6292 5c947f38 Iustin Pop

6293 5c947f38 Iustin Pop
    This checks that we have the given tag.
6294 5c947f38 Iustin Pop

6295 5c947f38 Iustin Pop
    """
6296 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6297 f27302fa Iustin Pop
    for tag in self.op.tags:
6298 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6299 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6300 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6301 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6302 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6303 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6304 f27302fa Iustin Pop
      diff_names.sort()
6305 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6306 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6307 5c947f38 Iustin Pop
6308 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6309 5c947f38 Iustin Pop
    """Remove the tag from the object.
6310 5c947f38 Iustin Pop

6311 5c947f38 Iustin Pop
    """
6312 f27302fa Iustin Pop
    for tag in self.op.tags:
6313 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6314 5c947f38 Iustin Pop
    try:
6315 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6316 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6317 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6318 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6319 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6320 06009e27 Iustin Pop
6321 0eed6e61 Guido Trotter
6322 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6323 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6324 06009e27 Iustin Pop

6325 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6326 06009e27 Iustin Pop
  time.
6327 06009e27 Iustin Pop

6328 06009e27 Iustin Pop
  """
6329 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6330 fbe9022f Guido Trotter
  REQ_BGL = False
6331 06009e27 Iustin Pop
6332 fbe9022f Guido Trotter
  def ExpandNames(self):
6333 fbe9022f Guido Trotter
    """Expand names and set required locks.
6334 06009e27 Iustin Pop

6335 fbe9022f Guido Trotter
    This expands the node list, if any.
6336 06009e27 Iustin Pop

6337 06009e27 Iustin Pop
    """
6338 fbe9022f Guido Trotter
    self.needed_locks = {}
6339 06009e27 Iustin Pop
    if self.op.on_nodes:
6340 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6341 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6342 fbe9022f Guido Trotter
      # more information.
6343 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6344 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6345 fbe9022f Guido Trotter
6346 fbe9022f Guido Trotter
  def CheckPrereq(self):
6347 fbe9022f Guido Trotter
    """Check prerequisites.
6348 fbe9022f Guido Trotter

6349 fbe9022f Guido Trotter
    """
6350 06009e27 Iustin Pop
6351 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6352 06009e27 Iustin Pop
    """Do the actual sleep.
6353 06009e27 Iustin Pop

6354 06009e27 Iustin Pop
    """
6355 06009e27 Iustin Pop
    if self.op.on_master:
6356 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6357 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6358 06009e27 Iustin Pop
    if self.op.on_nodes:
6359 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6360 06009e27 Iustin Pop
      if not result:
6361 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6362 06009e27 Iustin Pop
      for node, node_result in result.items():
6363 781de953 Iustin Pop
        node_result.Raise()
6364 781de953 Iustin Pop
        if not node_result.data:
6365 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6366 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6367 d61df03e Iustin Pop
6368 d61df03e Iustin Pop
6369 d1c2dd75 Iustin Pop
class IAllocator(object):
6370 d1c2dd75 Iustin Pop
  """IAllocator framework.
6371 d61df03e Iustin Pop

6372 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6373 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6374 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6375 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6376 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6377 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6378 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6379 d1c2dd75 Iustin Pop
      easy usage
6380 d61df03e Iustin Pop

6381 d61df03e Iustin Pop
  """
6382 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6383 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6384 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6385 d1c2dd75 Iustin Pop
    ]
6386 29859cb7 Iustin Pop
  _RELO_KEYS = [
6387 29859cb7 Iustin Pop
    "relocate_from",
6388 29859cb7 Iustin Pop
    ]
6389 d1c2dd75 Iustin Pop
6390 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6391 72737a7f Iustin Pop
    self.lu = lu
6392 d1c2dd75 Iustin Pop
    # init buffer variables
6393 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6394 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6395 29859cb7 Iustin Pop
    self.mode = mode
6396 29859cb7 Iustin Pop
    self.name = name
6397 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6398 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6399 a0add446 Iustin Pop
    self.hypervisor = None
6400 29859cb7 Iustin Pop
    self.relocate_from = None
6401 27579978 Iustin Pop
    # computed fields
6402 27579978 Iustin Pop
    self.required_nodes = None
6403 d1c2dd75 Iustin Pop
    # init result fields
6404 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6405 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6406 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6407 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6408 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6409 29859cb7 Iustin Pop
    else:
6410 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6411 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6412 d1c2dd75 Iustin Pop
    for key in kwargs:
6413 29859cb7 Iustin Pop
      if key not in keyset:
6414 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6415 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6416 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6417 29859cb7 Iustin Pop
    for key in keyset:
6418 d1c2dd75 Iustin Pop
      if key not in kwargs:
6419 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6420 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6421 d1c2dd75 Iustin Pop
    self._BuildInputData()
6422 d1c2dd75 Iustin Pop
6423 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6424 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6425 d1c2dd75 Iustin Pop

6426 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6427 d1c2dd75 Iustin Pop

6428 d1c2dd75 Iustin Pop
    """
6429 72737a7f Iustin Pop
    cfg = self.lu.cfg
6430 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6431 d1c2dd75 Iustin Pop
    # cluster data
6432 d1c2dd75 Iustin Pop
    data = {
6433 d1c2dd75 Iustin Pop
      "version": 1,
6434 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6435 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6436 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6437 d1c2dd75 Iustin Pop
      # we don't have job IDs
6438 d61df03e Iustin Pop
      }
6439 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6440 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6441 6286519f Iustin Pop
6442 d1c2dd75 Iustin Pop
    # node data
6443 d1c2dd75 Iustin Pop
    node_results = {}
6444 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6445 8cc7e742 Guido Trotter
6446 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6447 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6448 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6449 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6450 8cc7e742 Guido Trotter
6451 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6452 a0add446 Iustin Pop
                                           hypervisor_name)
6453 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6454 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6455 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6456 1325da74 Iustin Pop
      # first fill in static (config-based) values
6457 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6458 d1c2dd75 Iustin Pop
      pnr = {
6459 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6460 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6461 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6462 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6463 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6464 d1c2dd75 Iustin Pop
        }
6465 1325da74 Iustin Pop
6466 1325da74 Iustin Pop
      if not ninfo.offline:
6467 1325da74 Iustin Pop
        nresult.Raise()
6468 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6469 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6470 1325da74 Iustin Pop
        remote_info = nresult.data
6471 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6472 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6473 1325da74 Iustin Pop
          if attr not in remote_info:
6474 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6475 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6476 1325da74 Iustin Pop
          try:
6477 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6478 1325da74 Iustin Pop
          except ValueError, err:
6479 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6480 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6481 1325da74 Iustin Pop
        # compute memory used by primary instances
6482 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6483 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6484 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6485 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6486 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6487 1325da74 Iustin Pop
              i_used_mem = 0
6488 1325da74 Iustin Pop
            else:
6489 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6490 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6491 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6492 1325da74 Iustin Pop
6493 1325da74 Iustin Pop
            if iinfo.admin_up:
6494 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6495 1325da74 Iustin Pop
6496 1325da74 Iustin Pop
        # compute memory used by instances
6497 1325da74 Iustin Pop
        pnr_dyn = {
6498 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6499 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6500 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6501 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6502 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6503 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6504 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6505 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6506 1325da74 Iustin Pop
          }
6507 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6508 1325da74 Iustin Pop
6509 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6510 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6511 d1c2dd75 Iustin Pop
6512 d1c2dd75 Iustin Pop
    # instance data
6513 d1c2dd75 Iustin Pop
    instance_data = {}
6514 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6515 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6516 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
6517 d1c2dd75 Iustin Pop
      pir = {
6518 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6519 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6520 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6521 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6522 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6523 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6524 d1c2dd75 Iustin Pop
        "nics": nic_data,
6525 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6526 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6527 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6528 d1c2dd75 Iustin Pop
        }
6529 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6530 d61df03e Iustin Pop
6531 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6532 d61df03e Iustin Pop
6533 d1c2dd75 Iustin Pop
    self.in_data = data
6534 d61df03e Iustin Pop
6535 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6536 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6537 d61df03e Iustin Pop

6538 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6539 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6540 d61df03e Iustin Pop

6541 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6542 d1c2dd75 Iustin Pop
    done.
6543 d61df03e Iustin Pop

6544 d1c2dd75 Iustin Pop
    """
6545 d1c2dd75 Iustin Pop
    data = self.in_data
6546 d1c2dd75 Iustin Pop
6547 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6548 d1c2dd75 Iustin Pop
6549 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
6550 27579978 Iustin Pop
      self.required_nodes = 2
6551 27579978 Iustin Pop
    else:
6552 27579978 Iustin Pop
      self.required_nodes = 1
6553 d1c2dd75 Iustin Pop
    request = {
6554 d1c2dd75 Iustin Pop
      "type": "allocate",
6555 d1c2dd75 Iustin Pop
      "name": self.name,
6556 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
6557 d1c2dd75 Iustin Pop
      "tags": self.tags,
6558 d1c2dd75 Iustin Pop
      "os": self.os,
6559 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
6560 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
6561 d1c2dd75 Iustin Pop
      "disks": self.disks,
6562 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
6563 d1c2dd75 Iustin Pop
      "nics": self.nics,
6564 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6565 d1c2dd75 Iustin Pop
      }
6566 d1c2dd75 Iustin Pop
    data["request"] = request
6567 298fe380 Iustin Pop
6568 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
6569 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
6570 298fe380 Iustin Pop

6571 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
6572 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6573 d61df03e Iustin Pop

6574 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6575 d1c2dd75 Iustin Pop
    done.
6576 d61df03e Iustin Pop

6577 d1c2dd75 Iustin Pop
    """
6578 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6579 27579978 Iustin Pop
    if instance is None:
6580 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6581 27579978 Iustin Pop
                                   " IAllocator" % self.name)
6582 27579978 Iustin Pop
6583 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6584 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6585 27579978 Iustin Pop
6586 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6587 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6588 2a139bb0 Iustin Pop
6589 27579978 Iustin Pop
    self.required_nodes = 1
6590 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6591 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6592 27579978 Iustin Pop
6593 d1c2dd75 Iustin Pop
    request = {
6594 2a139bb0 Iustin Pop
      "type": "relocate",
6595 d1c2dd75 Iustin Pop
      "name": self.name,
6596 27579978 Iustin Pop
      "disk_space_total": disk_space,
6597 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6598 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
6599 d1c2dd75 Iustin Pop
      }
6600 27579978 Iustin Pop
    self.in_data["request"] = request
6601 d61df03e Iustin Pop
6602 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
6603 d1c2dd75 Iustin Pop
    """Build input data structures.
6604 d61df03e Iustin Pop

6605 d1c2dd75 Iustin Pop
    """
6606 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
6607 d61df03e Iustin Pop
6608 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6609 d1c2dd75 Iustin Pop
      self._AddNewInstance()
6610 d1c2dd75 Iustin Pop
    else:
6611 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
6612 d61df03e Iustin Pop
6613 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
6614 d61df03e Iustin Pop
6615 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
6616 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
6617 298fe380 Iustin Pop

6618 d1c2dd75 Iustin Pop
    """
6619 72737a7f Iustin Pop
    if call_fn is None:
6620 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
6621 d1c2dd75 Iustin Pop
    data = self.in_text
6622 298fe380 Iustin Pop
6623 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6624 781de953 Iustin Pop
    result.Raise()
6625 298fe380 Iustin Pop
6626 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6627 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
6628 8d528b7c Iustin Pop
6629 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
6630 8d528b7c Iustin Pop
6631 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
6632 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6633 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
6634 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
6635 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
6636 8d528b7c Iustin Pop
    self.out_text = stdout
6637 d1c2dd75 Iustin Pop
    if validate:
6638 d1c2dd75 Iustin Pop
      self._ValidateResult()
6639 298fe380 Iustin Pop
6640 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
6641 d1c2dd75 Iustin Pop
    """Process the allocator results.
6642 538475ca Iustin Pop

6643 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
6644 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
6645 538475ca Iustin Pop

6646 d1c2dd75 Iustin Pop
    """
6647 d1c2dd75 Iustin Pop
    try:
6648 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
6649 d1c2dd75 Iustin Pop
    except Exception, err:
6650 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6651 d1c2dd75 Iustin Pop
6652 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
6653 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6654 538475ca Iustin Pop
6655 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
6656 d1c2dd75 Iustin Pop
      if key not in rdict:
6657 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
6658 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
6659 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
6660 538475ca Iustin Pop
6661 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
6662 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6663 d1c2dd75 Iustin Pop
                               " is not a list")
6664 d1c2dd75 Iustin Pop
    self.out_data = rdict
6665 538475ca Iustin Pop
6666 538475ca Iustin Pop
6667 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
6668 d61df03e Iustin Pop
  """Run allocator tests.
6669 d61df03e Iustin Pop

6670 d61df03e Iustin Pop
  This LU runs the allocator tests
6671 d61df03e Iustin Pop

6672 d61df03e Iustin Pop
  """
6673 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
6674 d61df03e Iustin Pop
6675 d61df03e Iustin Pop
  def CheckPrereq(self):
6676 d61df03e Iustin Pop
    """Check prerequisites.
6677 d61df03e Iustin Pop

6678 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
6679 d61df03e Iustin Pop

6680 d61df03e Iustin Pop
    """
6681 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6682 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
6683 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
6684 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
6685 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6686 d61df03e Iustin Pop
                                     attr)
6687 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
6688 d61df03e Iustin Pop
      if iname is not None:
6689 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6690 d61df03e Iustin Pop
                                   iname)
6691 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
6692 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6693 d61df03e Iustin Pop
      for row in self.op.nics:
6694 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6695 d61df03e Iustin Pop
            "mac" not in row or
6696 d61df03e Iustin Pop
            "ip" not in row or
6697 d61df03e Iustin Pop
            "bridge" not in row):
6698 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6699 d61df03e Iustin Pop
                                     " 'nics' parameter")
6700 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
6701 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6702 d61df03e Iustin Pop
      for row in self.op.disks:
6703 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6704 d61df03e Iustin Pop
            "size" not in row or
6705 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
6706 d61df03e Iustin Pop
            "mode" not in row or
6707 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
6708 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6709 d61df03e Iustin Pop
                                     " 'disks' parameter")
6710 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6711 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
6712 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6713 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
6714 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6715 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
6716 d61df03e Iustin Pop
      if fname is None:
6717 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6718 d61df03e Iustin Pop
                                   self.op.name)
6719 d61df03e Iustin Pop
      self.op.name = fname
6720 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6721 d61df03e Iustin Pop
    else:
6722 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6723 d61df03e Iustin Pop
                                 self.op.mode)
6724 d61df03e Iustin Pop
6725 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
6726 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
6727 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
6728 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
6729 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
6730 d61df03e Iustin Pop
                                 self.op.direction)
6731 d61df03e Iustin Pop
6732 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
6733 d61df03e Iustin Pop
    """Run the allocator test.
6734 d61df03e Iustin Pop

6735 d61df03e Iustin Pop
    """
6736 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6737 72737a7f Iustin Pop
      ial = IAllocator(self,
6738 29859cb7 Iustin Pop
                       mode=self.op.mode,
6739 29859cb7 Iustin Pop
                       name=self.op.name,
6740 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
6741 29859cb7 Iustin Pop
                       disks=self.op.disks,
6742 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
6743 29859cb7 Iustin Pop
                       os=self.op.os,
6744 29859cb7 Iustin Pop
                       tags=self.op.tags,
6745 29859cb7 Iustin Pop
                       nics=self.op.nics,
6746 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
6747 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
6748 29859cb7 Iustin Pop
                       )
6749 29859cb7 Iustin Pop
    else:
6750 72737a7f Iustin Pop
      ial = IAllocator(self,
6751 29859cb7 Iustin Pop
                       mode=self.op.mode,
6752 29859cb7 Iustin Pop
                       name=self.op.name,
6753 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
6754 29859cb7 Iustin Pop
                       )
6755 d61df03e Iustin Pop
6756 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
6757 d1c2dd75 Iustin Pop
      result = ial.in_text
6758 298fe380 Iustin Pop
    else:
6759 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
6760 d1c2dd75 Iustin Pop
      result = ial.out_text
6761 298fe380 Iustin Pop
    return result