Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 4a34c5cf

History | View | Annotate | Download (255.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 d465bdc8 Guido Trotter
    - implement CheckPrereq
51 a8083063 Iustin Pop
    - implement Exec
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 0)
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 2bb5c911 Michael Hanselmann
    self.LogStep = processor.LogStep
93 20777413 Iustin Pop
    # support for dry-run
94 20777413 Iustin Pop
    self.dry_run_result = None
95 c92b310a Michael Hanselmann
96 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
97 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
98 a8083063 Iustin Pop
      if attr_val is None:
99 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
100 3ecf6786 Iustin Pop
                                   attr_name)
101 4be4691d Iustin Pop
    self.CheckArguments()
102 a8083063 Iustin Pop
103 c92b310a Michael Hanselmann
  def __GetSSH(self):
104 c92b310a Michael Hanselmann
    """Returns the SshRunner object
105 c92b310a Michael Hanselmann

106 c92b310a Michael Hanselmann
    """
107 c92b310a Michael Hanselmann
    if not self.__ssh:
108 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
109 c92b310a Michael Hanselmann
    return self.__ssh
110 c92b310a Michael Hanselmann
111 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
112 c92b310a Michael Hanselmann
113 4be4691d Iustin Pop
  def CheckArguments(self):
114 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
115 4be4691d Iustin Pop

116 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
117 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
118 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
119 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
120 4be4691d Iustin Pop

121 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
122 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
123 4be4691d Iustin Pop
        waited for them)
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
126 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
127 4be4691d Iustin Pop

128 4be4691d Iustin Pop
    """
129 4be4691d Iustin Pop
    pass
130 4be4691d Iustin Pop
131 d465bdc8 Guido Trotter
  def ExpandNames(self):
132 d465bdc8 Guido Trotter
    """Expand names for this LU.
133 d465bdc8 Guido Trotter

134 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
135 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
136 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
137 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
138 d465bdc8 Guido Trotter

139 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
140 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
141 d465bdc8 Guido Trotter
    as values. Rules:
142 e4376078 Iustin Pop

143 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
144 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
145 e4376078 Iustin Pop
      - don't put anything for the BGL level
146 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
147 d465bdc8 Guido Trotter

148 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
149 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
150 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
151 3977a4c1 Guido Trotter

152 e4376078 Iustin Pop
    Examples::
153 e4376078 Iustin Pop

154 e4376078 Iustin Pop
      # Acquire all nodes and one instance
155 e4376078 Iustin Pop
      self.needed_locks = {
156 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
157 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
158 e4376078 Iustin Pop
      }
159 e4376078 Iustin Pop
      # Acquire just two nodes
160 e4376078 Iustin Pop
      self.needed_locks = {
161 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
162 e4376078 Iustin Pop
      }
163 e4376078 Iustin Pop
      # Acquire no locks
164 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
165 d465bdc8 Guido Trotter

166 d465bdc8 Guido Trotter
    """
167 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
168 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
169 d465bdc8 Guido Trotter
    # time.
170 d465bdc8 Guido Trotter
    if self.REQ_BGL:
171 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
172 d465bdc8 Guido Trotter
    else:
173 d465bdc8 Guido Trotter
      raise NotImplementedError
174 d465bdc8 Guido Trotter
175 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
176 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
177 fb8dcb62 Guido Trotter

178 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
179 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
180 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
181 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
182 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
183 fb8dcb62 Guido Trotter
    default it does nothing.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
186 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
189 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
190 fb8dcb62 Guido Trotter

191 fb8dcb62 Guido Trotter
    """
192 fb8dcb62 Guido Trotter
193 a8083063 Iustin Pop
  def CheckPrereq(self):
194 a8083063 Iustin Pop
    """Check prerequisites for this LU.
195 a8083063 Iustin Pop

196 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
197 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
198 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
199 a8083063 Iustin Pop
    allowed.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
202 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
205 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
206 a8083063 Iustin Pop

207 a8083063 Iustin Pop
    """
208 a8083063 Iustin Pop
    raise NotImplementedError
209 a8083063 Iustin Pop
210 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
211 a8083063 Iustin Pop
    """Execute the LU.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
214 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
215 a8083063 Iustin Pop
    code, or expected.
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
    """
218 a8083063 Iustin Pop
    raise NotImplementedError
219 a8083063 Iustin Pop
220 a8083063 Iustin Pop
  def BuildHooksEnv(self):
221 a8083063 Iustin Pop
    """Build hooks environment for this LU.
222 a8083063 Iustin Pop

223 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
224 a8083063 Iustin Pop
    containing the environment that will be used for running the
225 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
226 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
227 a8083063 Iustin Pop
    the hook should run after the execution.
228 a8083063 Iustin Pop

229 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
230 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
231 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
232 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
233 a8083063 Iustin Pop

234 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
237 a8083063 Iustin Pop
    not be called.
238 a8083063 Iustin Pop

239 a8083063 Iustin Pop
    """
240 a8083063 Iustin Pop
    raise NotImplementedError
241 a8083063 Iustin Pop
242 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
243 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
244 1fce5219 Guido Trotter

245 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
246 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
247 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
248 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
249 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
250 1fce5219 Guido Trotter

251 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
252 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
253 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
254 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
255 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
256 e4376078 Iustin Pop
        in the PRE phase
257 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
258 e4376078 Iustin Pop
        and hook results
259 1fce5219 Guido Trotter

260 1fce5219 Guido Trotter
    """
261 1fce5219 Guido Trotter
    return lu_result
262 1fce5219 Guido Trotter
263 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
264 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
265 43905206 Guido Trotter

266 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
267 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
268 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
269 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
270 43905206 Guido Trotter
    before.
271 43905206 Guido Trotter

272 43905206 Guido Trotter
    """
273 43905206 Guido Trotter
    if self.needed_locks is None:
274 43905206 Guido Trotter
      self.needed_locks = {}
275 43905206 Guido Trotter
    else:
276 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
277 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
278 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
279 43905206 Guido Trotter
    if expanded_name is None:
280 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
281 43905206 Guido Trotter
                                  self.op.instance_name)
282 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
283 43905206 Guido Trotter
    self.op.instance_name = expanded_name
284 43905206 Guido Trotter
285 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
286 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
287 c4a2fee1 Guido Trotter

288 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
289 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
290 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
291 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
294 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
295 c4a2fee1 Guido Trotter

296 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
297 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
298 c4a2fee1 Guido Trotter

299 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
300 c4a2fee1 Guido Trotter

301 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
302 e4376078 Iustin Pop
        self._LockInstancesNodes()
303 c4a2fee1 Guido Trotter

304 a82ce292 Guido Trotter
    @type primary_only: boolean
305 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
306 a82ce292 Guido Trotter

307 c4a2fee1 Guido Trotter
    """
308 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
309 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
310 c4a2fee1 Guido Trotter
311 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
312 c4a2fee1 Guido Trotter
313 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
314 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
315 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
316 c4a2fee1 Guido Trotter
    wanted_nodes = []
317 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
318 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
319 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
320 a82ce292 Guido Trotter
      if not primary_only:
321 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
322 9513b6ab Guido Trotter
323 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
324 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
325 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
326 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
327 c4a2fee1 Guido Trotter
328 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
329 c4a2fee1 Guido Trotter
330 a8083063 Iustin Pop
331 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
332 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
335 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
336 a8083063 Iustin Pop

337 a8083063 Iustin Pop
  """
338 a8083063 Iustin Pop
  HPATH = None
339 a8083063 Iustin Pop
  HTYPE = None
340 a8083063 Iustin Pop
341 a8083063 Iustin Pop
342 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
343 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
344 83120a01 Michael Hanselmann

345 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
346 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
347 e4376078 Iustin Pop
  @type nodes: list
348 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
349 e4376078 Iustin Pop
  @rtype: list
350 e4376078 Iustin Pop
  @return: the list of nodes, sorted
351 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
352 83120a01 Michael Hanselmann

353 83120a01 Michael Hanselmann
  """
354 3312b702 Iustin Pop
  if not isinstance(nodes, list):
355 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
356 dcb93971 Michael Hanselmann
357 ea47808a Guido Trotter
  if not nodes:
358 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
359 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
360 dcb93971 Michael Hanselmann
361 ea47808a Guido Trotter
  wanted = []
362 ea47808a Guido Trotter
  for name in nodes:
363 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
364 ea47808a Guido Trotter
    if node is None:
365 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
366 ea47808a Guido Trotter
    wanted.append(node)
367 dcb93971 Michael Hanselmann
368 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
369 3312b702 Iustin Pop
370 3312b702 Iustin Pop
371 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
372 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
373 3312b702 Iustin Pop

374 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
375 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
376 e4376078 Iustin Pop
  @type instances: list
377 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
378 e4376078 Iustin Pop
  @rtype: list
379 e4376078 Iustin Pop
  @return: the list of instances, sorted
380 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
381 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
382 3312b702 Iustin Pop

383 3312b702 Iustin Pop
  """
384 3312b702 Iustin Pop
  if not isinstance(instances, list):
385 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
  if instances:
388 3312b702 Iustin Pop
    wanted = []
389 3312b702 Iustin Pop
390 3312b702 Iustin Pop
    for name in instances:
391 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
392 3312b702 Iustin Pop
      if instance is None:
393 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
394 3312b702 Iustin Pop
      wanted.append(instance)
395 3312b702 Iustin Pop
396 3312b702 Iustin Pop
  else:
397 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
398 a7f5dc98 Iustin Pop
  return wanted
399 dcb93971 Michael Hanselmann
400 dcb93971 Michael Hanselmann
401 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
402 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
403 83120a01 Michael Hanselmann

404 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
405 31bf511f Iustin Pop
  @param static: static fields set
406 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
407 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
408 83120a01 Michael Hanselmann

409 83120a01 Michael Hanselmann
  """
410 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
411 31bf511f Iustin Pop
  f.Extend(static)
412 31bf511f Iustin Pop
  f.Extend(dynamic)
413 dcb93971 Michael Hanselmann
414 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
415 31bf511f Iustin Pop
  if delta:
416 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
417 31bf511f Iustin Pop
                               % ",".join(delta))
418 dcb93971 Michael Hanselmann
419 dcb93971 Michael Hanselmann
420 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
421 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
422 a5961235 Iustin Pop

423 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
424 a5961235 Iustin Pop
  or None (but that it always exists).
425 a5961235 Iustin Pop

426 a5961235 Iustin Pop
  """
427 a5961235 Iustin Pop
  val = getattr(op, name, None)
428 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
429 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
430 a5961235 Iustin Pop
                               (name, str(val)))
431 a5961235 Iustin Pop
  setattr(op, name, val)
432 a5961235 Iustin Pop
433 a5961235 Iustin Pop
434 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
435 a5961235 Iustin Pop
  """Ensure that a given node is online.
436 a5961235 Iustin Pop

437 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
438 a5961235 Iustin Pop
  @param node: the node to check
439 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
440 a5961235 Iustin Pop

441 a5961235 Iustin Pop
  """
442 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
443 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
444 a5961235 Iustin Pop
445 a5961235 Iustin Pop
446 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
447 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
448 733a2b6a Iustin Pop

449 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
450 733a2b6a Iustin Pop
  @param node: the node to check
451 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
452 733a2b6a Iustin Pop

453 733a2b6a Iustin Pop
  """
454 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
455 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
456 733a2b6a Iustin Pop
457 733a2b6a Iustin Pop
458 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
459 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
460 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
461 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
462 e4376078 Iustin Pop

463 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
464 e4376078 Iustin Pop

465 e4376078 Iustin Pop
  @type name: string
466 e4376078 Iustin Pop
  @param name: the name of the instance
467 e4376078 Iustin Pop
  @type primary_node: string
468 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
469 e4376078 Iustin Pop
  @type secondary_nodes: list
470 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
471 e4376078 Iustin Pop
  @type os_type: string
472 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
473 0d68c45d Iustin Pop
  @type status: boolean
474 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
475 e4376078 Iustin Pop
  @type memory: string
476 e4376078 Iustin Pop
  @param memory: the memory size of the instance
477 e4376078 Iustin Pop
  @type vcpus: string
478 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
479 e4376078 Iustin Pop
  @type nics: list
480 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
481 5e3d3eb3 Guido Trotter
      the NICs the instance has
482 2c2690c9 Iustin Pop
  @type disk_template: string
483 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
484 2c2690c9 Iustin Pop
  @type disks: list
485 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
486 67fc3042 Iustin Pop
  @type bep: dict
487 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
488 67fc3042 Iustin Pop
  @type hvp: dict
489 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
490 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
491 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
492 e4376078 Iustin Pop
  @rtype: dict
493 e4376078 Iustin Pop
  @return: the hook environment for this instance
494 ecb215b5 Michael Hanselmann

495 396e1b78 Michael Hanselmann
  """
496 0d68c45d Iustin Pop
  if status:
497 0d68c45d Iustin Pop
    str_status = "up"
498 0d68c45d Iustin Pop
  else:
499 0d68c45d Iustin Pop
    str_status = "down"
500 396e1b78 Michael Hanselmann
  env = {
501 0e137c28 Iustin Pop
    "OP_TARGET": name,
502 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
503 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
504 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
505 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
506 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
507 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
508 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
509 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
510 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
511 396e1b78 Michael Hanselmann
  }
512 396e1b78 Michael Hanselmann
513 396e1b78 Michael Hanselmann
  if nics:
514 396e1b78 Michael Hanselmann
    nic_count = len(nics)
515 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
516 396e1b78 Michael Hanselmann
      if ip is None:
517 396e1b78 Michael Hanselmann
        ip = ""
518 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
519 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
520 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
521 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
522 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
523 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
524 396e1b78 Michael Hanselmann
  else:
525 396e1b78 Michael Hanselmann
    nic_count = 0
526 396e1b78 Michael Hanselmann
527 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
528 396e1b78 Michael Hanselmann
529 2c2690c9 Iustin Pop
  if disks:
530 2c2690c9 Iustin Pop
    disk_count = len(disks)
531 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
532 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
533 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
534 2c2690c9 Iustin Pop
  else:
535 2c2690c9 Iustin Pop
    disk_count = 0
536 2c2690c9 Iustin Pop
537 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
538 2c2690c9 Iustin Pop
539 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
540 67fc3042 Iustin Pop
    for key, value in source.items():
541 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
542 67fc3042 Iustin Pop
543 396e1b78 Michael Hanselmann
  return env
544 396e1b78 Michael Hanselmann
545 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
546 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
547 62f0dd02 Guido Trotter

548 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
549 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
550 62f0dd02 Guido Trotter

551 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
552 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
553 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
554 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
555 62f0dd02 Guido Trotter

556 62f0dd02 Guido Trotter
  """
557 62f0dd02 Guido Trotter
  hooks_nics = []
558 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
559 62f0dd02 Guido Trotter
  for nic in nics:
560 62f0dd02 Guido Trotter
    ip = nic.ip
561 62f0dd02 Guido Trotter
    mac = nic.mac
562 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
563 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
564 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
565 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
566 62f0dd02 Guido Trotter
  return hooks_nics
567 396e1b78 Michael Hanselmann
568 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
569 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
570 ecb215b5 Michael Hanselmann

571 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
572 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
573 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
574 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
575 e4376078 Iustin Pop
      environment
576 e4376078 Iustin Pop
  @type override: dict
577 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
578 e4376078 Iustin Pop
      our values
579 e4376078 Iustin Pop
  @rtype: dict
580 e4376078 Iustin Pop
  @return: the hook environment dictionary
581 e4376078 Iustin Pop

582 ecb215b5 Michael Hanselmann
  """
583 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
584 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
585 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
586 396e1b78 Michael Hanselmann
  args = {
587 396e1b78 Michael Hanselmann
    'name': instance.name,
588 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
589 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
590 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
591 0d68c45d Iustin Pop
    'status': instance.admin_up,
592 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
593 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
594 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
595 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
596 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
597 67fc3042 Iustin Pop
    'bep': bep,
598 67fc3042 Iustin Pop
    'hvp': hvp,
599 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
600 396e1b78 Michael Hanselmann
  }
601 396e1b78 Michael Hanselmann
  if override:
602 396e1b78 Michael Hanselmann
    args.update(override)
603 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
604 396e1b78 Michael Hanselmann
605 396e1b78 Michael Hanselmann
606 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
607 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
608 ec0292f1 Iustin Pop

609 ec0292f1 Iustin Pop
  """
610 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
611 ec0292f1 Iustin Pop
  if mod_list:
612 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
613 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
614 ec0292f1 Iustin Pop
    for name in mod_list:
615 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
616 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
617 ec0292f1 Iustin Pop
  if mc_now > mc_max:
618 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
619 ec0292f1 Iustin Pop
               (mc_now, mc_max))
620 ec0292f1 Iustin Pop
621 ec0292f1 Iustin Pop
622 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
623 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
624 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
625 b165e77e Guido Trotter

626 b165e77e Guido Trotter
  """
627 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
628 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
629 b165e77e Guido Trotter
                for nic in target_nics]
630 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
631 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
632 b165e77e Guido Trotter
  if brlist:
633 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
634 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
635 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
636 b165e77e Guido Trotter
637 b165e77e Guido Trotter
638 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
639 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
640 bf6929a2 Alexander Schreiber

641 bf6929a2 Alexander Schreiber
  """
642 b165e77e Guido Trotter
  if node is None:
643 29921401 Iustin Pop
    node = instance.primary_node
644 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
645 bf6929a2 Alexander Schreiber
646 bf6929a2 Alexander Schreiber
647 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
648 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
649 a8083063 Iustin Pop

650 a8083063 Iustin Pop
  """
651 a8083063 Iustin Pop
  _OP_REQP = []
652 a8083063 Iustin Pop
653 a8083063 Iustin Pop
  def CheckPrereq(self):
654 a8083063 Iustin Pop
    """Check prerequisites.
655 a8083063 Iustin Pop

656 a8083063 Iustin Pop
    This checks whether the cluster is empty.
657 a8083063 Iustin Pop

658 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
    """
661 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
662 a8083063 Iustin Pop
663 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
664 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
665 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
666 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
667 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
668 db915bd1 Michael Hanselmann
    if instancelist:
669 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
670 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
673 a8083063 Iustin Pop
    """Destroys the cluster.
674 a8083063 Iustin Pop

675 a8083063 Iustin Pop
    """
676 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
677 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
678 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
679 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
680 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
681 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
682 140aa4a8 Iustin Pop
    return master
683 a8083063 Iustin Pop
684 a8083063 Iustin Pop
685 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
686 a8083063 Iustin Pop
  """Verifies the cluster status.
687 a8083063 Iustin Pop

688 a8083063 Iustin Pop
  """
689 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
690 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
691 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
692 d4b9d97f Guido Trotter
  REQ_BGL = False
693 d4b9d97f Guido Trotter
694 d4b9d97f Guido Trotter
  def ExpandNames(self):
695 d4b9d97f Guido Trotter
    self.needed_locks = {
696 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
697 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
698 d4b9d97f Guido Trotter
    }
699 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
700 a8083063 Iustin Pop
701 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
702 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
703 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
704 a8083063 Iustin Pop
    """Run multiple tests against a node.
705 a8083063 Iustin Pop

706 112f18a5 Iustin Pop
    Test list:
707 e4376078 Iustin Pop

708 a8083063 Iustin Pop
      - compares ganeti version
709 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
710 a8083063 Iustin Pop
      - checks config file checksum
711 a8083063 Iustin Pop
      - checks ssh to other nodes
712 a8083063 Iustin Pop

713 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
714 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
715 e4376078 Iustin Pop
    @param file_list: required list of files
716 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
717 e4376078 Iustin Pop
    @param node_result: the results from the node
718 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
719 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
720 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
721 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
722 6d2e83d5 Iustin Pop
        and their running status
723 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
724 098c0958 Michael Hanselmann

725 a8083063 Iustin Pop
    """
726 112f18a5 Iustin Pop
    node = nodeinfo.name
727 25361b9a Iustin Pop
728 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
729 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
730 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
731 25361b9a Iustin Pop
      return True
732 25361b9a Iustin Pop
733 a8083063 Iustin Pop
    # compares ganeti version
734 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
735 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
736 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
737 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
738 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
739 a8083063 Iustin Pop
      return True
740 a8083063 Iustin Pop
741 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
742 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
743 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
744 a8083063 Iustin Pop
      return True
745 a8083063 Iustin Pop
746 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
747 a8083063 Iustin Pop
748 a8083063 Iustin Pop
    bad = False
749 e9ce0a64 Iustin Pop
750 e9ce0a64 Iustin Pop
    # full package version
751 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
752 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
753 e9ce0a64 Iustin Pop
                  " node %s %s" %
754 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
755 e9ce0a64 Iustin Pop
756 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
757 cc9e1230 Guido Trotter
    if vg_name is not None:
758 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
759 cc9e1230 Guido Trotter
      if not vglist:
760 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
761 cc9e1230 Guido Trotter
                        (node,))
762 a8083063 Iustin Pop
        bad = True
763 cc9e1230 Guido Trotter
      else:
764 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
765 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
766 cc9e1230 Guido Trotter
        if vgstatus:
767 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
768 cc9e1230 Guido Trotter
          bad = True
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
    # checks config file checksum
771 a8083063 Iustin Pop
772 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
773 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
774 a8083063 Iustin Pop
      bad = True
775 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
776 a8083063 Iustin Pop
    else:
777 a8083063 Iustin Pop
      for file_name in file_list:
778 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
779 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
780 a8083063 Iustin Pop
        if file_name not in remote_cksum:
781 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
782 112f18a5 Iustin Pop
            bad = True
783 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
784 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
785 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
786 112f18a5 Iustin Pop
            bad = True
787 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
788 112f18a5 Iustin Pop
          else:
789 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
790 112f18a5 Iustin Pop
            bad = True
791 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
792 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
793 112f18a5 Iustin Pop
        else:
794 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
795 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
796 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
797 112f18a5 Iustin Pop
                        " candidates" % file_name)
798 a8083063 Iustin Pop
799 25361b9a Iustin Pop
    # checks ssh to any
800 25361b9a Iustin Pop
801 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
802 a8083063 Iustin Pop
      bad = True
803 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
804 a8083063 Iustin Pop
    else:
805 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
806 a8083063 Iustin Pop
        bad = True
807 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
808 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
809 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
810 25361b9a Iustin Pop
811 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
812 9d4bfc96 Iustin Pop
      bad = True
813 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
814 9d4bfc96 Iustin Pop
    else:
815 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
816 9d4bfc96 Iustin Pop
        bad = True
817 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
818 9d4bfc96 Iustin Pop
        for node in nlist:
819 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
820 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
821 9d4bfc96 Iustin Pop
822 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
823 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
824 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
825 e69d05fd Iustin Pop
        if hv_result is not None:
826 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
827 e69d05fd Iustin Pop
                      (hv_name, hv_result))
828 6d2e83d5 Iustin Pop
829 6d2e83d5 Iustin Pop
    # check used drbd list
830 cc9e1230 Guido Trotter
    if vg_name is not None:
831 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
832 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
833 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
834 cc9e1230 Guido Trotter
                    str(used_minors))
835 cc9e1230 Guido Trotter
      else:
836 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
837 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
838 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
839 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
840 cc9e1230 Guido Trotter
            bad = True
841 cc9e1230 Guido Trotter
        for minor in used_minors:
842 cc9e1230 Guido Trotter
          if minor not in drbd_map:
843 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
844 35e994e9 Iustin Pop
                        minor)
845 cc9e1230 Guido Trotter
            bad = True
846 6d2e83d5 Iustin Pop
847 a8083063 Iustin Pop
    return bad
848 a8083063 Iustin Pop
849 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
850 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
851 a8083063 Iustin Pop
    """Verify an instance.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
    This function checks to see if the required block devices are
854 a8083063 Iustin Pop
    available on the instance's node.
855 a8083063 Iustin Pop

856 a8083063 Iustin Pop
    """
857 a8083063 Iustin Pop
    bad = False
858 a8083063 Iustin Pop
859 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
860 a8083063 Iustin Pop
861 a8083063 Iustin Pop
    node_vol_should = {}
862 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
863 a8083063 Iustin Pop
864 a8083063 Iustin Pop
    for node in node_vol_should:
865 0a66c968 Iustin Pop
      if node in n_offline:
866 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
867 0a66c968 Iustin Pop
        continue
868 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
869 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
870 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
871 a8083063 Iustin Pop
                          (volume, node))
872 a8083063 Iustin Pop
          bad = True
873 a8083063 Iustin Pop
874 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
875 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
876 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
877 0a66c968 Iustin Pop
          node_current not in n_offline):
878 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
879 a8083063 Iustin Pop
                        (instance, node_current))
880 a8083063 Iustin Pop
        bad = True
881 a8083063 Iustin Pop
882 a8083063 Iustin Pop
    for node in node_instance:
883 a8083063 Iustin Pop
      if (not node == node_current):
884 a8083063 Iustin Pop
        if instance in node_instance[node]:
885 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
886 a8083063 Iustin Pop
                          (instance, node))
887 a8083063 Iustin Pop
          bad = True
888 a8083063 Iustin Pop
889 6a438c98 Michael Hanselmann
    return bad
890 a8083063 Iustin Pop
891 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
892 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
893 a8083063 Iustin Pop

894 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
895 a8083063 Iustin Pop
    reported as unknown.
896 a8083063 Iustin Pop

897 a8083063 Iustin Pop
    """
898 a8083063 Iustin Pop
    bad = False
899 a8083063 Iustin Pop
900 a8083063 Iustin Pop
    for node in node_vol_is:
901 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
902 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
903 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
904 a8083063 Iustin Pop
                      (volume, node))
905 a8083063 Iustin Pop
          bad = True
906 a8083063 Iustin Pop
    return bad
907 a8083063 Iustin Pop
908 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
909 a8083063 Iustin Pop
    """Verify the list of running instances.
910 a8083063 Iustin Pop

911 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
912 a8083063 Iustin Pop

913 a8083063 Iustin Pop
    """
914 a8083063 Iustin Pop
    bad = False
915 a8083063 Iustin Pop
    for node in node_instance:
916 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
917 a8083063 Iustin Pop
        if runninginstance not in instancelist:
918 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
919 a8083063 Iustin Pop
                          (runninginstance, node))
920 a8083063 Iustin Pop
          bad = True
921 a8083063 Iustin Pop
    return bad
922 a8083063 Iustin Pop
923 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
924 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
925 2b3b6ddd Guido Trotter

926 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
927 2b3b6ddd Guido Trotter
    was primary for.
928 2b3b6ddd Guido Trotter

929 2b3b6ddd Guido Trotter
    """
930 2b3b6ddd Guido Trotter
    bad = False
931 2b3b6ddd Guido Trotter
932 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
933 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
934 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
935 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
936 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
937 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
938 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
939 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
940 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
941 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
942 2b3b6ddd Guido Trotter
        needed_mem = 0
943 2b3b6ddd Guido Trotter
        for instance in instances:
944 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
945 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
946 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
947 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
948 5bbd3f7f Michael Hanselmann
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
949 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
950 2b3b6ddd Guido Trotter
          bad = True
951 2b3b6ddd Guido Trotter
    return bad
952 2b3b6ddd Guido Trotter
953 a8083063 Iustin Pop
  def CheckPrereq(self):
954 a8083063 Iustin Pop
    """Check prerequisites.
955 a8083063 Iustin Pop

956 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
957 e54c4c5e Guido Trotter
    all its members are valid.
958 a8083063 Iustin Pop

959 a8083063 Iustin Pop
    """
960 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
961 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
962 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
963 a8083063 Iustin Pop
964 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
965 d8fff41c Guido Trotter
    """Build hooks env.
966 d8fff41c Guido Trotter

967 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
968 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
969 d8fff41c Guido Trotter

970 d8fff41c Guido Trotter
    """
971 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
972 35e994e9 Iustin Pop
    env = {
973 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
974 35e994e9 Iustin Pop
      }
975 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
976 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
977 35e994e9 Iustin Pop
978 d8fff41c Guido Trotter
    return env, [], all_nodes
979 d8fff41c Guido Trotter
980 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
981 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
982 a8083063 Iustin Pop

983 a8083063 Iustin Pop
    """
984 a8083063 Iustin Pop
    bad = False
985 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
986 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
987 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
988 a8083063 Iustin Pop
989 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
990 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
991 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
992 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
993 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
994 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
995 6d2e83d5 Iustin Pop
                        for iname in instancelist)
996 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
997 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
998 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
999 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
1000 a8083063 Iustin Pop
    node_volume = {}
1001 a8083063 Iustin Pop
    node_instance = {}
1002 9c9c7d30 Guido Trotter
    node_info = {}
1003 26b6af5e Guido Trotter
    instance_cfg = {}
1004 a8083063 Iustin Pop
1005 a8083063 Iustin Pop
    # FIXME: verify OS list
1006 a8083063 Iustin Pop
    # do local checksums
1007 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1008 112f18a5 Iustin Pop
1009 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1010 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1011 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1012 112f18a5 Iustin Pop
    file_names.extend(master_files)
1013 112f18a5 Iustin Pop
1014 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1015 a8083063 Iustin Pop
1016 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1017 a8083063 Iustin Pop
    node_verify_param = {
1018 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1019 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1020 82e37788 Iustin Pop
                              if not node.offline],
1021 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1022 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1023 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1024 82e37788 Iustin Pop
                                 if not node.offline],
1025 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1026 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1027 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1028 a8083063 Iustin Pop
      }
1029 cc9e1230 Guido Trotter
    if vg_name is not None:
1030 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1031 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1032 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1033 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1034 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1035 a8083063 Iustin Pop
1036 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1037 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1038 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1039 6d2e83d5 Iustin Pop
1040 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1041 112f18a5 Iustin Pop
      node = node_i.name
1042 25361b9a Iustin Pop
1043 0a66c968 Iustin Pop
      if node_i.offline:
1044 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1045 0a66c968 Iustin Pop
        n_offline.append(node)
1046 0a66c968 Iustin Pop
        continue
1047 0a66c968 Iustin Pop
1048 112f18a5 Iustin Pop
      if node == master_node:
1049 25361b9a Iustin Pop
        ntype = "master"
1050 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1051 25361b9a Iustin Pop
        ntype = "master candidate"
1052 22f0f71d Iustin Pop
      elif node_i.drained:
1053 22f0f71d Iustin Pop
        ntype = "drained"
1054 22f0f71d Iustin Pop
        n_drained.append(node)
1055 112f18a5 Iustin Pop
      else:
1056 25361b9a Iustin Pop
        ntype = "regular"
1057 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1058 25361b9a Iustin Pop
1059 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1060 6f68a739 Iustin Pop
      if msg:
1061 6f68a739 Iustin Pop
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1062 25361b9a Iustin Pop
        bad = True
1063 25361b9a Iustin Pop
        continue
1064 25361b9a Iustin Pop
1065 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1066 6d2e83d5 Iustin Pop
      node_drbd = {}
1067 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1068 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1069 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1070 c614e5fb Iustin Pop
                      instance)
1071 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1072 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1073 c614e5fb Iustin Pop
          # unallocated minor in use)
1074 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1075 c614e5fb Iustin Pop
        else:
1076 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1077 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1078 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1079 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1080 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1081 a8083063 Iustin Pop
      bad = bad or result
1082 a8083063 Iustin Pop
1083 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1084 cc9e1230 Guido Trotter
      if vg_name is None:
1085 cc9e1230 Guido Trotter
        node_volume[node] = {}
1086 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1087 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1088 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1089 b63ed789 Iustin Pop
        bad = True
1090 b63ed789 Iustin Pop
        node_volume[node] = {}
1091 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1092 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1093 a8083063 Iustin Pop
        bad = True
1094 a8083063 Iustin Pop
        continue
1095 b63ed789 Iustin Pop
      else:
1096 25361b9a Iustin Pop
        node_volume[node] = lvdata
1097 a8083063 Iustin Pop
1098 a8083063 Iustin Pop
      # node_instance
1099 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1100 25361b9a Iustin Pop
      if not isinstance(idata, list):
1101 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1102 25361b9a Iustin Pop
                    (node,))
1103 a8083063 Iustin Pop
        bad = True
1104 a8083063 Iustin Pop
        continue
1105 a8083063 Iustin Pop
1106 25361b9a Iustin Pop
      node_instance[node] = idata
1107 a8083063 Iustin Pop
1108 9c9c7d30 Guido Trotter
      # node_info
1109 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1110 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1111 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1112 9c9c7d30 Guido Trotter
        bad = True
1113 9c9c7d30 Guido Trotter
        continue
1114 9c9c7d30 Guido Trotter
1115 9c9c7d30 Guido Trotter
      try:
1116 9c9c7d30 Guido Trotter
        node_info[node] = {
1117 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1118 93e4c50b Guido Trotter
          "pinst": [],
1119 93e4c50b Guido Trotter
          "sinst": [],
1120 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1121 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1122 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1123 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1124 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1125 36e7da50 Guido Trotter
          # secondary.
1126 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1127 9c9c7d30 Guido Trotter
        }
1128 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1129 cc9e1230 Guido Trotter
        if vg_name is not None:
1130 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1131 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1132 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1133 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1134 9a198532 Iustin Pop
                        (node, vg_name))
1135 9a198532 Iustin Pop
            bad = True
1136 9a198532 Iustin Pop
            continue
1137 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1138 9a198532 Iustin Pop
      except (ValueError, KeyError):
1139 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1140 9a198532 Iustin Pop
                    " from node %s" % (node,))
1141 9c9c7d30 Guido Trotter
        bad = True
1142 9c9c7d30 Guido Trotter
        continue
1143 9c9c7d30 Guido Trotter
1144 a8083063 Iustin Pop
    node_vol_should = {}
1145 a8083063 Iustin Pop
1146 a8083063 Iustin Pop
    for instance in instancelist:
1147 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1148 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1149 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1150 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1151 c5705f58 Guido Trotter
      bad = bad or result
1152 832261fd Iustin Pop
      inst_nodes_offline = []
1153 a8083063 Iustin Pop
1154 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1155 a8083063 Iustin Pop
1156 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1157 26b6af5e Guido Trotter
1158 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1159 93e4c50b Guido Trotter
      if pnode in node_info:
1160 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1161 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1162 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1163 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1164 93e4c50b Guido Trotter
        bad = True
1165 93e4c50b Guido Trotter
1166 832261fd Iustin Pop
      if pnode in n_offline:
1167 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1168 832261fd Iustin Pop
1169 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1170 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1171 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1172 93e4c50b Guido Trotter
      # supported either.
1173 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1174 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1175 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1176 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1177 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1178 93e4c50b Guido Trotter
                    % instance)
1179 93e4c50b Guido Trotter
1180 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1181 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1182 3924700f Iustin Pop
1183 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1184 93e4c50b Guido Trotter
        if snode in node_info:
1185 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1186 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1187 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1188 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1189 0a66c968 Iustin Pop
        elif snode not in n_offline:
1190 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1191 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1192 832261fd Iustin Pop
          bad = True
1193 832261fd Iustin Pop
        if snode in n_offline:
1194 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1195 832261fd Iustin Pop
1196 832261fd Iustin Pop
      if inst_nodes_offline:
1197 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1198 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1199 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1200 832261fd Iustin Pop
        bad = True
1201 93e4c50b Guido Trotter
1202 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1203 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1204 a8083063 Iustin Pop
                                       feedback_fn)
1205 a8083063 Iustin Pop
    bad = bad or result
1206 a8083063 Iustin Pop
1207 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1208 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1209 a8083063 Iustin Pop
                                         feedback_fn)
1210 a8083063 Iustin Pop
    bad = bad or result
1211 a8083063 Iustin Pop
1212 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1213 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1214 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1215 e54c4c5e Guido Trotter
      bad = bad or result
1216 2b3b6ddd Guido Trotter
1217 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1218 2b3b6ddd Guido Trotter
    if i_non_redundant:
1219 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1220 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1221 2b3b6ddd Guido Trotter
1222 3924700f Iustin Pop
    if i_non_a_balanced:
1223 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1224 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1225 3924700f Iustin Pop
1226 0a66c968 Iustin Pop
    if n_offline:
1227 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1228 0a66c968 Iustin Pop
1229 22f0f71d Iustin Pop
    if n_drained:
1230 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1231 22f0f71d Iustin Pop
1232 34290825 Michael Hanselmann
    return not bad
1233 a8083063 Iustin Pop
1234 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1235 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1236 e4376078 Iustin Pop

1237 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1238 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1239 d8fff41c Guido Trotter

1240 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1241 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1242 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1243 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1244 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1245 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1246 e4376078 Iustin Pop
        and hook results
1247 d8fff41c Guido Trotter

1248 d8fff41c Guido Trotter
    """
1249 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1250 38206f3c Iustin Pop
    # their results
1251 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1252 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1253 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1254 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1255 d8fff41c Guido Trotter
      if not hooks_results:
1256 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1257 d8fff41c Guido Trotter
        lu_result = 1
1258 d8fff41c Guido Trotter
      else:
1259 d8fff41c Guido Trotter
        for node_name in hooks_results:
1260 d8fff41c Guido Trotter
          show_node_header = True
1261 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1262 4c4e4e1e Iustin Pop
          msg = res.fail_msg
1263 3fb4f740 Iustin Pop
          if msg:
1264 0a66c968 Iustin Pop
            if res.offline:
1265 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1266 0a66c968 Iustin Pop
              continue
1267 3fb4f740 Iustin Pop
            feedback_fn("    Communication failure in hooks execution: %s" %
1268 3fb4f740 Iustin Pop
                        msg)
1269 d8fff41c Guido Trotter
            lu_result = 1
1270 d8fff41c Guido Trotter
            continue
1271 3fb4f740 Iustin Pop
          for script, hkr, output in res.payload:
1272 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1273 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1274 d8fff41c Guido Trotter
              # failing hooks on that node
1275 d8fff41c Guido Trotter
              if show_node_header:
1276 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1277 d8fff41c Guido Trotter
                show_node_header = False
1278 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1279 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1280 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1281 d8fff41c Guido Trotter
              lu_result = 1
1282 d8fff41c Guido Trotter
1283 d8fff41c Guido Trotter
      return lu_result
1284 d8fff41c Guido Trotter
1285 a8083063 Iustin Pop
1286 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1287 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1288 2c95a8d4 Iustin Pop

1289 2c95a8d4 Iustin Pop
  """
1290 2c95a8d4 Iustin Pop
  _OP_REQP = []
1291 d4b9d97f Guido Trotter
  REQ_BGL = False
1292 d4b9d97f Guido Trotter
1293 d4b9d97f Guido Trotter
  def ExpandNames(self):
1294 d4b9d97f Guido Trotter
    self.needed_locks = {
1295 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1296 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1297 d4b9d97f Guido Trotter
    }
1298 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
1299 2c95a8d4 Iustin Pop
1300 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1301 2c95a8d4 Iustin Pop
    """Check prerequisites.
1302 2c95a8d4 Iustin Pop

1303 2c95a8d4 Iustin Pop
    This has no prerequisites.
1304 2c95a8d4 Iustin Pop

1305 2c95a8d4 Iustin Pop
    """
1306 2c95a8d4 Iustin Pop
    pass
1307 2c95a8d4 Iustin Pop
1308 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1309 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1310 2c95a8d4 Iustin Pop

1311 29d376ec Iustin Pop
    @rtype: tuple of three items
1312 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1313 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1314 29d376ec Iustin Pop
        missing volumes
1315 29d376ec Iustin Pop

1316 2c95a8d4 Iustin Pop
    """
1317 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1318 2c95a8d4 Iustin Pop
1319 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1320 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1321 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1322 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1323 2c95a8d4 Iustin Pop
1324 2c95a8d4 Iustin Pop
    nv_dict = {}
1325 2c95a8d4 Iustin Pop
    for inst in instances:
1326 2c95a8d4 Iustin Pop
      inst_lvs = {}
1327 0d68c45d Iustin Pop
      if (not inst.admin_up or
1328 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1329 2c95a8d4 Iustin Pop
        continue
1330 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1331 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1332 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1333 2c95a8d4 Iustin Pop
        for vol in vol_list:
1334 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1335 2c95a8d4 Iustin Pop
1336 2c95a8d4 Iustin Pop
    if not nv_dict:
1337 2c95a8d4 Iustin Pop
      return result
1338 2c95a8d4 Iustin Pop
1339 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1340 2c95a8d4 Iustin Pop
1341 2c95a8d4 Iustin Pop
    for node in nodes:
1342 2c95a8d4 Iustin Pop
      # node_volume
1343 29d376ec Iustin Pop
      node_res = node_lvs[node]
1344 29d376ec Iustin Pop
      if node_res.offline:
1345 ea9ddc07 Iustin Pop
        continue
1346 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1347 29d376ec Iustin Pop
      if msg:
1348 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1349 29d376ec Iustin Pop
        res_nodes[node] = msg
1350 2c95a8d4 Iustin Pop
        continue
1351 2c95a8d4 Iustin Pop
1352 29d376ec Iustin Pop
      lvs = node_res.payload
1353 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1354 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1355 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1356 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1357 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1358 2c95a8d4 Iustin Pop
1359 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1360 b63ed789 Iustin Pop
    # data better
1361 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1362 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1363 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1364 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1365 b63ed789 Iustin Pop
1366 2c95a8d4 Iustin Pop
    return result
1367 2c95a8d4 Iustin Pop
1368 2c95a8d4 Iustin Pop
1369 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1370 07bd8a51 Iustin Pop
  """Rename the cluster.
1371 07bd8a51 Iustin Pop

1372 07bd8a51 Iustin Pop
  """
1373 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1374 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1375 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1376 07bd8a51 Iustin Pop
1377 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1378 07bd8a51 Iustin Pop
    """Build hooks env.
1379 07bd8a51 Iustin Pop

1380 07bd8a51 Iustin Pop
    """
1381 07bd8a51 Iustin Pop
    env = {
1382 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1383 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1384 07bd8a51 Iustin Pop
      }
1385 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1386 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1387 07bd8a51 Iustin Pop
1388 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1389 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1390 07bd8a51 Iustin Pop

1391 07bd8a51 Iustin Pop
    """
1392 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1393 07bd8a51 Iustin Pop
1394 bcf043c9 Iustin Pop
    new_name = hostname.name
1395 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1396 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1397 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1398 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1399 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1400 07bd8a51 Iustin Pop
                                 " cluster has changed")
1401 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1402 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1403 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1404 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1405 07bd8a51 Iustin Pop
                                   new_ip)
1406 07bd8a51 Iustin Pop
1407 07bd8a51 Iustin Pop
    self.op.name = new_name
1408 07bd8a51 Iustin Pop
1409 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1410 07bd8a51 Iustin Pop
    """Rename the cluster.
1411 07bd8a51 Iustin Pop

1412 07bd8a51 Iustin Pop
    """
1413 07bd8a51 Iustin Pop
    clustername = self.op.name
1414 07bd8a51 Iustin Pop
    ip = self.ip
1415 07bd8a51 Iustin Pop
1416 07bd8a51 Iustin Pop
    # shutdown the master IP
1417 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1418 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1419 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1420 07bd8a51 Iustin Pop
1421 07bd8a51 Iustin Pop
    try:
1422 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1423 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1424 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1425 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1426 ec85e3d5 Iustin Pop
1427 ec85e3d5 Iustin Pop
      # update the known hosts file
1428 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1429 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1430 ec85e3d5 Iustin Pop
      try:
1431 ec85e3d5 Iustin Pop
        node_list.remove(master)
1432 ec85e3d5 Iustin Pop
      except ValueError:
1433 ec85e3d5 Iustin Pop
        pass
1434 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1435 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1436 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1437 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1438 6f7d4e75 Iustin Pop
        if msg:
1439 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1440 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1441 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1442 ec85e3d5 Iustin Pop
1443 07bd8a51 Iustin Pop
    finally:
1444 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1445 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1446 b726aff0 Iustin Pop
      if msg:
1447 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1448 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1449 07bd8a51 Iustin Pop
1450 07bd8a51 Iustin Pop
1451 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1452 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1453 8084f9f6 Manuel Franceschini

1454 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1455 e4376078 Iustin Pop
  @param disk: the disk to check
1456 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1457 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1458 8084f9f6 Manuel Franceschini

1459 8084f9f6 Manuel Franceschini
  """
1460 8084f9f6 Manuel Franceschini
  if disk.children:
1461 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1462 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1463 8084f9f6 Manuel Franceschini
        return True
1464 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1465 8084f9f6 Manuel Franceschini
1466 8084f9f6 Manuel Franceschini
1467 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1468 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1469 8084f9f6 Manuel Franceschini

1470 8084f9f6 Manuel Franceschini
  """
1471 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1472 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1473 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1474 c53279cf Guido Trotter
  REQ_BGL = False
1475 c53279cf Guido Trotter
1476 3994f455 Iustin Pop
  def CheckArguments(self):
1477 4b7735f9 Iustin Pop
    """Check parameters
1478 4b7735f9 Iustin Pop

1479 4b7735f9 Iustin Pop
    """
1480 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1481 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1482 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1483 4b7735f9 Iustin Pop
      try:
1484 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1485 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1486 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1487 4b7735f9 Iustin Pop
                                   str(err))
1488 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1489 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1490 4b7735f9 Iustin Pop
1491 c53279cf Guido Trotter
  def ExpandNames(self):
1492 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1493 c53279cf Guido Trotter
    # all nodes to be modified.
1494 c53279cf Guido Trotter
    self.needed_locks = {
1495 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1496 c53279cf Guido Trotter
    }
1497 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1498 8084f9f6 Manuel Franceschini
1499 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1500 8084f9f6 Manuel Franceschini
    """Build hooks env.
1501 8084f9f6 Manuel Franceschini

1502 8084f9f6 Manuel Franceschini
    """
1503 8084f9f6 Manuel Franceschini
    env = {
1504 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1505 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1506 8084f9f6 Manuel Franceschini
      }
1507 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1508 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1509 8084f9f6 Manuel Franceschini
1510 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1511 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1512 8084f9f6 Manuel Franceschini

1513 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1514 5f83e263 Iustin Pop
    if the given volume group is valid.
1515 8084f9f6 Manuel Franceschini

1516 8084f9f6 Manuel Franceschini
    """
1517 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1518 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1519 8084f9f6 Manuel Franceschini
      for inst in instances:
1520 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1521 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1522 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1523 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1524 8084f9f6 Manuel Franceschini
1525 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1526 779c15bb Iustin Pop
1527 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1528 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1529 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1530 8084f9f6 Manuel Franceschini
      for node in node_list:
1531 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1532 e480923b Iustin Pop
        if msg:
1533 781de953 Iustin Pop
          # ignoring down node
1534 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1535 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1536 781de953 Iustin Pop
          continue
1537 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1538 781de953 Iustin Pop
                                              self.op.vg_name,
1539 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1540 8084f9f6 Manuel Franceschini
        if vgstatus:
1541 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1542 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1543 8084f9f6 Manuel Franceschini
1544 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1545 5af3da74 Guido Trotter
    # validate params changes
1546 779c15bb Iustin Pop
    if self.op.beparams:
1547 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1548 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1549 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1550 779c15bb Iustin Pop
1551 5af3da74 Guido Trotter
    if self.op.nicparams:
1552 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1553 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1554 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1555 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1556 5af3da74 Guido Trotter
1557 779c15bb Iustin Pop
    # hypervisor list/parameters
1558 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1559 779c15bb Iustin Pop
    if self.op.hvparams:
1560 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1561 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1562 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1563 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1564 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1565 779c15bb Iustin Pop
        else:
1566 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1567 779c15bb Iustin Pop
1568 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1569 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1570 b119bccb Guido Trotter
      if not self.hv_list:
1571 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1572 b119bccb Guido Trotter
                                   " least one member")
1573 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1574 b119bccb Guido Trotter
      if invalid_hvs:
1575 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1576 b119bccb Guido Trotter
                                   " entries: %s" % invalid_hvs)
1577 779c15bb Iustin Pop
    else:
1578 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1579 779c15bb Iustin Pop
1580 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1581 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1582 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1583 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1584 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1585 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1586 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1587 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1588 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1589 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1590 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1591 779c15bb Iustin Pop
1592 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1593 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1594 8084f9f6 Manuel Franceschini

1595 8084f9f6 Manuel Franceschini
    """
1596 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1597 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1598 b2482333 Guido Trotter
      if not new_volume:
1599 b2482333 Guido Trotter
        new_volume = None
1600 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1601 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1602 779c15bb Iustin Pop
      else:
1603 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1604 779c15bb Iustin Pop
                    " state, not changing")
1605 779c15bb Iustin Pop
    if self.op.hvparams:
1606 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1607 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1608 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1609 779c15bb Iustin Pop
    if self.op.beparams:
1610 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1611 5af3da74 Guido Trotter
    if self.op.nicparams:
1612 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1613 5af3da74 Guido Trotter
1614 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1615 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1616 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1617 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1618 4b7735f9 Iustin Pop
1619 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1620 8084f9f6 Manuel Franceschini
1621 8084f9f6 Manuel Franceschini
1622 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1623 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1624 28eddce5 Guido Trotter

1625 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1626 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1627 28eddce5 Guido Trotter
  makes sure those are copied.
1628 28eddce5 Guido Trotter

1629 28eddce5 Guido Trotter
  @param lu: calling logical unit
1630 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1631 28eddce5 Guido Trotter

1632 28eddce5 Guido Trotter
  """
1633 28eddce5 Guido Trotter
  # 1. Gather target nodes
1634 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1635 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1636 28eddce5 Guido Trotter
  if additional_nodes is not None:
1637 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
1638 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
1639 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
1640 28eddce5 Guido Trotter
  # 2. Gather files to distribute
1641 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
1642 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
1643 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
1644 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
1645 4a34c5cf Guido Trotter
                    constants.HMAC_CLUSTER_KEY,
1646 28eddce5 Guido Trotter
                   ])
1647 e1b8653f Guido Trotter
1648 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1649 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
1650 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
1651 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
1652 e1b8653f Guido Trotter
1653 28eddce5 Guido Trotter
  # 3. Perform the files upload
1654 28eddce5 Guido Trotter
  for fname in dist_files:
1655 28eddce5 Guido Trotter
    if os.path.exists(fname):
1656 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1657 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
1658 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1659 6f7d4e75 Iustin Pop
        if msg:
1660 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1661 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
1662 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
1663 28eddce5 Guido Trotter
1664 28eddce5 Guido Trotter
1665 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1666 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1667 afee0879 Iustin Pop

1668 afee0879 Iustin Pop
  This is a very simple LU.
1669 afee0879 Iustin Pop

1670 afee0879 Iustin Pop
  """
1671 afee0879 Iustin Pop
  _OP_REQP = []
1672 afee0879 Iustin Pop
  REQ_BGL = False
1673 afee0879 Iustin Pop
1674 afee0879 Iustin Pop
  def ExpandNames(self):
1675 afee0879 Iustin Pop
    self.needed_locks = {
1676 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1677 afee0879 Iustin Pop
    }
1678 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1679 afee0879 Iustin Pop
1680 afee0879 Iustin Pop
  def CheckPrereq(self):
1681 afee0879 Iustin Pop
    """Check prerequisites.
1682 afee0879 Iustin Pop

1683 afee0879 Iustin Pop
    """
1684 afee0879 Iustin Pop
1685 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1686 afee0879 Iustin Pop
    """Redistribute the configuration.
1687 afee0879 Iustin Pop

1688 afee0879 Iustin Pop
    """
1689 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1690 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
1691 afee0879 Iustin Pop
1692 afee0879 Iustin Pop
1693 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1694 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1695 a8083063 Iustin Pop

1696 a8083063 Iustin Pop
  """
1697 a8083063 Iustin Pop
  if not instance.disks:
1698 a8083063 Iustin Pop
    return True
1699 a8083063 Iustin Pop
1700 a8083063 Iustin Pop
  if not oneshot:
1701 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
  node = instance.primary_node
1704 a8083063 Iustin Pop
1705 a8083063 Iustin Pop
  for dev in instance.disks:
1706 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1707 a8083063 Iustin Pop
1708 a8083063 Iustin Pop
  retries = 0
1709 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1710 a8083063 Iustin Pop
  while True:
1711 a8083063 Iustin Pop
    max_time = 0
1712 a8083063 Iustin Pop
    done = True
1713 a8083063 Iustin Pop
    cumul_degraded = False
1714 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1715 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1716 3efa9051 Iustin Pop
    if msg:
1717 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1718 a8083063 Iustin Pop
      retries += 1
1719 a8083063 Iustin Pop
      if retries >= 10:
1720 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1721 3ecf6786 Iustin Pop
                                 " aborting." % node)
1722 a8083063 Iustin Pop
      time.sleep(6)
1723 a8083063 Iustin Pop
      continue
1724 3efa9051 Iustin Pop
    rstats = rstats.payload
1725 a8083063 Iustin Pop
    retries = 0
1726 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1727 a8083063 Iustin Pop
      if mstat is None:
1728 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1729 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1730 a8083063 Iustin Pop
        continue
1731 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1732 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1733 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1734 a8083063 Iustin Pop
      if perc_done is not None:
1735 a8083063 Iustin Pop
        done = False
1736 a8083063 Iustin Pop
        if est_time is not None:
1737 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1738 a8083063 Iustin Pop
          max_time = est_time
1739 a8083063 Iustin Pop
        else:
1740 a8083063 Iustin Pop
          rem_time = "no time estimate"
1741 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1742 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1743 fbafd7a8 Iustin Pop
1744 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1745 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1746 fbafd7a8 Iustin Pop
    # we force restart of the loop
1747 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1748 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1749 fbafd7a8 Iustin Pop
      degr_retries -= 1
1750 fbafd7a8 Iustin Pop
      time.sleep(1)
1751 fbafd7a8 Iustin Pop
      continue
1752 fbafd7a8 Iustin Pop
1753 a8083063 Iustin Pop
    if done or oneshot:
1754 a8083063 Iustin Pop
      break
1755 a8083063 Iustin Pop
1756 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1757 a8083063 Iustin Pop
1758 a8083063 Iustin Pop
  if done:
1759 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1760 a8083063 Iustin Pop
  return not cumul_degraded
1761 a8083063 Iustin Pop
1762 a8083063 Iustin Pop
1763 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1764 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1765 a8083063 Iustin Pop

1766 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1767 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1768 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1769 0834c866 Iustin Pop

1770 a8083063 Iustin Pop
  """
1771 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1772 0834c866 Iustin Pop
  if ldisk:
1773 0834c866 Iustin Pop
    idx = 6
1774 0834c866 Iustin Pop
  else:
1775 0834c866 Iustin Pop
    idx = 5
1776 a8083063 Iustin Pop
1777 a8083063 Iustin Pop
  result = True
1778 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1779 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1780 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1781 23829f6f Iustin Pop
    if msg:
1782 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1783 23829f6f Iustin Pop
      result = False
1784 23829f6f Iustin Pop
    elif not rstats.payload:
1785 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1786 a8083063 Iustin Pop
      result = False
1787 a8083063 Iustin Pop
    else:
1788 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1789 a8083063 Iustin Pop
  if dev.children:
1790 a8083063 Iustin Pop
    for child in dev.children:
1791 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1792 a8083063 Iustin Pop
1793 a8083063 Iustin Pop
  return result
1794 a8083063 Iustin Pop
1795 a8083063 Iustin Pop
1796 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1797 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1798 a8083063 Iustin Pop

1799 a8083063 Iustin Pop
  """
1800 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1801 6bf01bbb Guido Trotter
  REQ_BGL = False
1802 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1803 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1804 a8083063 Iustin Pop
1805 6bf01bbb Guido Trotter
  def ExpandNames(self):
1806 1f9430d6 Iustin Pop
    if self.op.names:
1807 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1808 1f9430d6 Iustin Pop
1809 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1810 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1811 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1812 1f9430d6 Iustin Pop
1813 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1814 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1815 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1816 6bf01bbb Guido Trotter
    self.needed_locks = {}
1817 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1818 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1819 6bf01bbb Guido Trotter
1820 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1821 6bf01bbb Guido Trotter
    """Check prerequisites.
1822 6bf01bbb Guido Trotter

1823 6bf01bbb Guido Trotter
    """
1824 6bf01bbb Guido Trotter
1825 1f9430d6 Iustin Pop
  @staticmethod
1826 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1827 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1828 1f9430d6 Iustin Pop

1829 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1830 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1831 1f9430d6 Iustin Pop

1832 e4376078 Iustin Pop
    @rtype: dict
1833 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1834 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1835 e4376078 Iustin Pop

1836 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1837 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
1838 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
1839 e4376078 Iustin Pop
          }
1840 1f9430d6 Iustin Pop

1841 1f9430d6 Iustin Pop
    """
1842 1f9430d6 Iustin Pop
    all_os = {}
1843 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1844 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1845 a6ab004b Iustin Pop
    # make all OSes invalid
1846 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1847 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
1848 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
1849 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
1850 1f9430d6 Iustin Pop
        continue
1851 255dcebd Iustin Pop
      for name, path, status, diagnose in nr.payload:
1852 255dcebd Iustin Pop
        if name not in all_os:
1853 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1854 1f9430d6 Iustin Pop
          # for each node in node_list
1855 255dcebd Iustin Pop
          all_os[name] = {}
1856 a6ab004b Iustin Pop
          for nname in good_nodes:
1857 255dcebd Iustin Pop
            all_os[name][nname] = []
1858 255dcebd Iustin Pop
        all_os[name][node_name].append((path, status, diagnose))
1859 1f9430d6 Iustin Pop
    return all_os
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1862 a8083063 Iustin Pop
    """Compute the list of OSes.
1863 a8083063 Iustin Pop

1864 a8083063 Iustin Pop
    """
1865 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1866 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1867 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1868 1f9430d6 Iustin Pop
    output = []
1869 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
1870 1f9430d6 Iustin Pop
      row = []
1871 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1872 1f9430d6 Iustin Pop
        if field == "name":
1873 1f9430d6 Iustin Pop
          val = os_name
1874 1f9430d6 Iustin Pop
        elif field == "valid":
1875 255dcebd Iustin Pop
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1876 1f9430d6 Iustin Pop
        elif field == "node_status":
1877 255dcebd Iustin Pop
          # this is just a copy of the dict
1878 1f9430d6 Iustin Pop
          val = {}
1879 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
1880 255dcebd Iustin Pop
            val[node_name] = nos_list
1881 1f9430d6 Iustin Pop
        else:
1882 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1883 1f9430d6 Iustin Pop
        row.append(val)
1884 1f9430d6 Iustin Pop
      output.append(row)
1885 1f9430d6 Iustin Pop
1886 1f9430d6 Iustin Pop
    return output
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
1889 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1890 a8083063 Iustin Pop
  """Logical unit for removing a node.
1891 a8083063 Iustin Pop

1892 a8083063 Iustin Pop
  """
1893 a8083063 Iustin Pop
  HPATH = "node-remove"
1894 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1895 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1896 a8083063 Iustin Pop
1897 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1898 a8083063 Iustin Pop
    """Build hooks env.
1899 a8083063 Iustin Pop

1900 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1901 d08869ee Guido Trotter
    node would then be impossible to remove.
1902 a8083063 Iustin Pop

1903 a8083063 Iustin Pop
    """
1904 396e1b78 Michael Hanselmann
    env = {
1905 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1906 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1907 396e1b78 Michael Hanselmann
      }
1908 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1909 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1910 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
  def CheckPrereq(self):
1913 a8083063 Iustin Pop
    """Check prerequisites.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
    This checks:
1916 a8083063 Iustin Pop
     - the node exists in the configuration
1917 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1918 a8083063 Iustin Pop
     - it's not the master
1919 a8083063 Iustin Pop

1920 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
1921 a8083063 Iustin Pop

1922 a8083063 Iustin Pop
    """
1923 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1924 a8083063 Iustin Pop
    if node is None:
1925 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1926 a8083063 Iustin Pop
1927 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1928 a8083063 Iustin Pop
1929 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1930 a8083063 Iustin Pop
    if node.name == masternode:
1931 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1932 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1933 a8083063 Iustin Pop
1934 a8083063 Iustin Pop
    for instance_name in instance_list:
1935 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1936 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1937 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1938 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1939 a8083063 Iustin Pop
    self.op.node_name = node.name
1940 a8083063 Iustin Pop
    self.node = node
1941 a8083063 Iustin Pop
1942 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1943 a8083063 Iustin Pop
    """Removes the node from the cluster.
1944 a8083063 Iustin Pop

1945 a8083063 Iustin Pop
    """
1946 a8083063 Iustin Pop
    node = self.node
1947 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1948 9a4f63d1 Iustin Pop
                 node.name)
1949 a8083063 Iustin Pop
1950 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1951 a8083063 Iustin Pop
1952 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
1953 4c4e4e1e Iustin Pop
    msg = result.fail_msg
1954 0623d351 Iustin Pop
    if msg:
1955 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
1956 0623d351 Iustin Pop
                      " the cluster: %s", msg)
1957 c8a0948f Michael Hanselmann
1958 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1959 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1960 eb1742d5 Guido Trotter
1961 a8083063 Iustin Pop
1962 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1963 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1964 a8083063 Iustin Pop

1965 a8083063 Iustin Pop
  """
1966 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1967 35705d8f Guido Trotter
  REQ_BGL = False
1968 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1969 31bf511f Iustin Pop
    "dtotal", "dfree",
1970 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1971 31bf511f Iustin Pop
    "bootid",
1972 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1973 31bf511f Iustin Pop
    )
1974 31bf511f Iustin Pop
1975 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1976 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1977 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1978 31bf511f Iustin Pop
    "pip", "sip", "tags",
1979 31bf511f Iustin Pop
    "serial_no",
1980 0e67cdbe Iustin Pop
    "master_candidate",
1981 0e67cdbe Iustin Pop
    "master",
1982 9ddb5e45 Iustin Pop
    "offline",
1983 0b2454b9 Iustin Pop
    "drained",
1984 c120ff34 Iustin Pop
    "role",
1985 31bf511f Iustin Pop
    )
1986 a8083063 Iustin Pop
1987 35705d8f Guido Trotter
  def ExpandNames(self):
1988 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1989 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1990 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1991 a8083063 Iustin Pop
1992 35705d8f Guido Trotter
    self.needed_locks = {}
1993 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1994 c8d8b4c8 Iustin Pop
1995 c8d8b4c8 Iustin Pop
    if self.op.names:
1996 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1997 35705d8f Guido Trotter
    else:
1998 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1999 c8d8b4c8 Iustin Pop
2000 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2001 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2002 c8d8b4c8 Iustin Pop
    if self.do_locking:
2003 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2004 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2005 c8d8b4c8 Iustin Pop
2006 35705d8f Guido Trotter
2007 35705d8f Guido Trotter
  def CheckPrereq(self):
2008 35705d8f Guido Trotter
    """Check prerequisites.
2009 35705d8f Guido Trotter

2010 35705d8f Guido Trotter
    """
2011 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2012 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2013 c8d8b4c8 Iustin Pop
    pass
2014 a8083063 Iustin Pop
2015 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2016 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2017 a8083063 Iustin Pop

2018 a8083063 Iustin Pop
    """
2019 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2020 c8d8b4c8 Iustin Pop
    if self.do_locking:
2021 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2022 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2023 3fa93523 Guido Trotter
      nodenames = self.wanted
2024 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2025 3fa93523 Guido Trotter
      if missing:
2026 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2027 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2028 c8d8b4c8 Iustin Pop
    else:
2029 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2030 c1f1cbb2 Iustin Pop
2031 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2032 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
    # begin data gathering
2035 a8083063 Iustin Pop
2036 bc8e4a1a Iustin Pop
    if self.do_node_query:
2037 a8083063 Iustin Pop
      live_data = {}
2038 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2039 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2040 a8083063 Iustin Pop
      for name in nodenames:
2041 781de953 Iustin Pop
        nodeinfo = node_data[name]
2042 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2043 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2044 d599d686 Iustin Pop
          fn = utils.TryConvert
2045 a8083063 Iustin Pop
          live_data[name] = {
2046 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2047 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2048 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2049 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2050 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2051 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2052 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2053 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2054 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2055 a8083063 Iustin Pop
            }
2056 a8083063 Iustin Pop
        else:
2057 a8083063 Iustin Pop
          live_data[name] = {}
2058 a8083063 Iustin Pop
    else:
2059 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2060 a8083063 Iustin Pop
2061 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2062 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2063 a8083063 Iustin Pop
2064 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2065 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2066 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2067 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2068 a8083063 Iustin Pop
2069 ec223efb Iustin Pop
      for instance_name in instancelist:
2070 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2071 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2072 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2073 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2074 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2075 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2076 a8083063 Iustin Pop
2077 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2078 0e67cdbe Iustin Pop
2079 a8083063 Iustin Pop
    # end data gathering
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
    output = []
2082 a8083063 Iustin Pop
    for node in nodelist:
2083 a8083063 Iustin Pop
      node_output = []
2084 a8083063 Iustin Pop
      for field in self.op.output_fields:
2085 a8083063 Iustin Pop
        if field == "name":
2086 a8083063 Iustin Pop
          val = node.name
2087 ec223efb Iustin Pop
        elif field == "pinst_list":
2088 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2089 ec223efb Iustin Pop
        elif field == "sinst_list":
2090 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2091 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2092 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2093 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2094 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2095 a8083063 Iustin Pop
        elif field == "pip":
2096 a8083063 Iustin Pop
          val = node.primary_ip
2097 a8083063 Iustin Pop
        elif field == "sip":
2098 a8083063 Iustin Pop
          val = node.secondary_ip
2099 130a6a6f Iustin Pop
        elif field == "tags":
2100 130a6a6f Iustin Pop
          val = list(node.GetTags())
2101 38d7239a Iustin Pop
        elif field == "serial_no":
2102 38d7239a Iustin Pop
          val = node.serial_no
2103 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2104 0e67cdbe Iustin Pop
          val = node.master_candidate
2105 0e67cdbe Iustin Pop
        elif field == "master":
2106 0e67cdbe Iustin Pop
          val = node.name == master_node
2107 9ddb5e45 Iustin Pop
        elif field == "offline":
2108 9ddb5e45 Iustin Pop
          val = node.offline
2109 0b2454b9 Iustin Pop
        elif field == "drained":
2110 0b2454b9 Iustin Pop
          val = node.drained
2111 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2112 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2113 c120ff34 Iustin Pop
        elif field == "role":
2114 c120ff34 Iustin Pop
          if node.name == master_node:
2115 c120ff34 Iustin Pop
            val = "M"
2116 c120ff34 Iustin Pop
          elif node.master_candidate:
2117 c120ff34 Iustin Pop
            val = "C"
2118 c120ff34 Iustin Pop
          elif node.drained:
2119 c120ff34 Iustin Pop
            val = "D"
2120 c120ff34 Iustin Pop
          elif node.offline:
2121 c120ff34 Iustin Pop
            val = "O"
2122 c120ff34 Iustin Pop
          else:
2123 c120ff34 Iustin Pop
            val = "R"
2124 a8083063 Iustin Pop
        else:
2125 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2126 a8083063 Iustin Pop
        node_output.append(val)
2127 a8083063 Iustin Pop
      output.append(node_output)
2128 a8083063 Iustin Pop
2129 a8083063 Iustin Pop
    return output
2130 a8083063 Iustin Pop
2131 a8083063 Iustin Pop
2132 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2133 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2134 dcb93971 Michael Hanselmann

2135 dcb93971 Michael Hanselmann
  """
2136 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2137 21a15682 Guido Trotter
  REQ_BGL = False
2138 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2139 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2140 21a15682 Guido Trotter
2141 21a15682 Guido Trotter
  def ExpandNames(self):
2142 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2143 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2144 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2145 21a15682 Guido Trotter
2146 21a15682 Guido Trotter
    self.needed_locks = {}
2147 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2148 21a15682 Guido Trotter
    if not self.op.nodes:
2149 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2150 21a15682 Guido Trotter
    else:
2151 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2152 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2153 dcb93971 Michael Hanselmann
2154 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2155 dcb93971 Michael Hanselmann
    """Check prerequisites.
2156 dcb93971 Michael Hanselmann

2157 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2158 dcb93971 Michael Hanselmann

2159 dcb93971 Michael Hanselmann
    """
2160 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2161 dcb93971 Michael Hanselmann
2162 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2163 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2164 dcb93971 Michael Hanselmann

2165 dcb93971 Michael Hanselmann
    """
2166 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2167 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2168 dcb93971 Michael Hanselmann
2169 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2170 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2171 dcb93971 Michael Hanselmann
2172 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2173 dcb93971 Michael Hanselmann
2174 dcb93971 Michael Hanselmann
    output = []
2175 dcb93971 Michael Hanselmann
    for node in nodenames:
2176 10bfe6cb Iustin Pop
      nresult = volumes[node]
2177 10bfe6cb Iustin Pop
      if nresult.offline:
2178 10bfe6cb Iustin Pop
        continue
2179 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2180 10bfe6cb Iustin Pop
      if msg:
2181 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2182 37d19eb2 Michael Hanselmann
        continue
2183 37d19eb2 Michael Hanselmann
2184 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2185 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2186 dcb93971 Michael Hanselmann
2187 dcb93971 Michael Hanselmann
      for vol in node_vols:
2188 dcb93971 Michael Hanselmann
        node_output = []
2189 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2190 dcb93971 Michael Hanselmann
          if field == "node":
2191 dcb93971 Michael Hanselmann
            val = node
2192 dcb93971 Michael Hanselmann
          elif field == "phys":
2193 dcb93971 Michael Hanselmann
            val = vol['dev']
2194 dcb93971 Michael Hanselmann
          elif field == "vg":
2195 dcb93971 Michael Hanselmann
            val = vol['vg']
2196 dcb93971 Michael Hanselmann
          elif field == "name":
2197 dcb93971 Michael Hanselmann
            val = vol['name']
2198 dcb93971 Michael Hanselmann
          elif field == "size":
2199 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2200 dcb93971 Michael Hanselmann
          elif field == "instance":
2201 dcb93971 Michael Hanselmann
            for inst in ilist:
2202 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2203 dcb93971 Michael Hanselmann
                continue
2204 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2205 dcb93971 Michael Hanselmann
                val = inst.name
2206 dcb93971 Michael Hanselmann
                break
2207 dcb93971 Michael Hanselmann
            else:
2208 dcb93971 Michael Hanselmann
              val = '-'
2209 dcb93971 Michael Hanselmann
          else:
2210 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2211 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2212 dcb93971 Michael Hanselmann
2213 dcb93971 Michael Hanselmann
        output.append(node_output)
2214 dcb93971 Michael Hanselmann
2215 dcb93971 Michael Hanselmann
    return output
2216 dcb93971 Michael Hanselmann
2217 dcb93971 Michael Hanselmann
2218 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2219 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2220 a8083063 Iustin Pop

2221 a8083063 Iustin Pop
  """
2222 a8083063 Iustin Pop
  HPATH = "node-add"
2223 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2224 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2225 a8083063 Iustin Pop
2226 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2227 a8083063 Iustin Pop
    """Build hooks env.
2228 a8083063 Iustin Pop

2229 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2230 a8083063 Iustin Pop

2231 a8083063 Iustin Pop
    """
2232 a8083063 Iustin Pop
    env = {
2233 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2234 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2235 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2236 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2237 a8083063 Iustin Pop
      }
2238 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2239 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2240 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2241 a8083063 Iustin Pop
2242 a8083063 Iustin Pop
  def CheckPrereq(self):
2243 a8083063 Iustin Pop
    """Check prerequisites.
2244 a8083063 Iustin Pop

2245 a8083063 Iustin Pop
    This checks:
2246 a8083063 Iustin Pop
     - the new node is not already in the config
2247 a8083063 Iustin Pop
     - it is resolvable
2248 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2249 a8083063 Iustin Pop

2250 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2251 a8083063 Iustin Pop

2252 a8083063 Iustin Pop
    """
2253 a8083063 Iustin Pop
    node_name = self.op.node_name
2254 a8083063 Iustin Pop
    cfg = self.cfg
2255 a8083063 Iustin Pop
2256 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2257 a8083063 Iustin Pop
2258 bcf043c9 Iustin Pop
    node = dns_data.name
2259 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2260 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2261 a8083063 Iustin Pop
    if secondary_ip is None:
2262 a8083063 Iustin Pop
      secondary_ip = primary_ip
2263 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2264 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2265 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2266 e7c6e02b Michael Hanselmann
2267 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2268 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2269 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2270 e7c6e02b Michael Hanselmann
                                 node)
2271 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2272 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
    for existing_node_name in node_list:
2275 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2276 e7c6e02b Michael Hanselmann
2277 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2278 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2279 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2280 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2281 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2282 e7c6e02b Michael Hanselmann
        continue
2283 e7c6e02b Michael Hanselmann
2284 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2285 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2286 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2287 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2288 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2289 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2290 a8083063 Iustin Pop
2291 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2292 a8083063 Iustin Pop
    # same as for the master
2293 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2294 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2295 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2296 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2297 a8083063 Iustin Pop
      if master_singlehomed:
2298 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2299 3ecf6786 Iustin Pop
                                   " new node has one")
2300 a8083063 Iustin Pop
      else:
2301 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2302 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2303 a8083063 Iustin Pop
2304 5bbd3f7f Michael Hanselmann
    # checks reachability
2305 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2306 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2307 a8083063 Iustin Pop
2308 a8083063 Iustin Pop
    if not newbie_singlehomed:
2309 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2310 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2311 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2312 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2313 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2314 a8083063 Iustin Pop
2315 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2316 a8ae3eb5 Iustin Pop
    if self.op.readd:
2317 a8ae3eb5 Iustin Pop
      exceptions = [node]
2318 a8ae3eb5 Iustin Pop
    else:
2319 a8ae3eb5 Iustin Pop
      exceptions = []
2320 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2321 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2322 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2323 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2324 0fff97e9 Guido Trotter
2325 a8ae3eb5 Iustin Pop
    if self.op.readd:
2326 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2327 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2328 a8ae3eb5 Iustin Pop
    else:
2329 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2330 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2331 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2332 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2333 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2334 a8083063 Iustin Pop
2335 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2336 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2337 a8083063 Iustin Pop

2338 a8083063 Iustin Pop
    """
2339 a8083063 Iustin Pop
    new_node = self.new_node
2340 a8083063 Iustin Pop
    node = new_node.name
2341 a8083063 Iustin Pop
2342 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2343 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2344 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2345 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2346 a8ae3eb5 Iustin Pop
    if self.op.readd:
2347 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2348 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2349 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2350 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2351 a8ae3eb5 Iustin Pop
2352 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2353 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2354 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2355 a8ae3eb5 Iustin Pop
2356 a8083063 Iustin Pop
    # check connectivity
2357 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2358 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2359 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2360 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2361 90b54c26 Iustin Pop
                   node, result.payload)
2362 a8083063 Iustin Pop
    else:
2363 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2364 90b54c26 Iustin Pop
                               " node version %s" %
2365 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2366 a8083063 Iustin Pop
2367 a8083063 Iustin Pop
    # setup ssh on node
2368 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2369 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2370 a8083063 Iustin Pop
    keyarray = []
2371 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2372 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2373 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2374 a8083063 Iustin Pop
2375 a8083063 Iustin Pop
    for i in keyfiles:
2376 a8083063 Iustin Pop
      f = open(i, 'r')
2377 a8083063 Iustin Pop
      try:
2378 a8083063 Iustin Pop
        keyarray.append(f.read())
2379 a8083063 Iustin Pop
      finally:
2380 a8083063 Iustin Pop
        f.close()
2381 a8083063 Iustin Pop
2382 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2383 72737a7f Iustin Pop
                                    keyarray[2],
2384 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2385 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2386 a8083063 Iustin Pop
2387 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2388 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2389 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2390 c8a0948f Michael Hanselmann
2391 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2392 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2393 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2394 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2395 4c4e4e1e Iustin Pop
                   prereq=True)
2396 c2fc8250 Iustin Pop
      if not result.payload:
2397 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2398 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2399 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2400 a8083063 Iustin Pop
2401 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2402 5c0527ed Guido Trotter
    node_verify_param = {
2403 5c0527ed Guido Trotter
      'nodelist': [node],
2404 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2405 5c0527ed Guido Trotter
    }
2406 5c0527ed Guido Trotter
2407 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2408 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2409 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2410 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2411 6f68a739 Iustin Pop
      nl_payload = result[verifier].payload['nodelist']
2412 6f68a739 Iustin Pop
      if nl_payload:
2413 6f68a739 Iustin Pop
        for failed in nl_payload:
2414 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2415 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2416 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2417 ff98055b Iustin Pop
2418 d8470559 Michael Hanselmann
    if self.op.readd:
2419 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2420 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2421 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2422 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2423 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2424 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2425 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2426 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2427 a8ae3eb5 Iustin Pop
        if msg:
2428 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2429 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2430 d8470559 Michael Hanselmann
    else:
2431 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2432 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2433 a8083063 Iustin Pop
2434 a8083063 Iustin Pop
2435 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2436 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2437 b31c8676 Iustin Pop

2438 b31c8676 Iustin Pop
  """
2439 b31c8676 Iustin Pop
  HPATH = "node-modify"
2440 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2441 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2442 b31c8676 Iustin Pop
  REQ_BGL = False
2443 b31c8676 Iustin Pop
2444 b31c8676 Iustin Pop
  def CheckArguments(self):
2445 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2446 b31c8676 Iustin Pop
    if node_name is None:
2447 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2448 b31c8676 Iustin Pop
    self.op.node_name = node_name
2449 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2450 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2451 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2452 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2453 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2454 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2455 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2456 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2457 c9d443ea Iustin Pop
                                 " state at the same time")
2458 b31c8676 Iustin Pop
2459 b31c8676 Iustin Pop
  def ExpandNames(self):
2460 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2461 b31c8676 Iustin Pop
2462 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2463 b31c8676 Iustin Pop
    """Build hooks env.
2464 b31c8676 Iustin Pop

2465 b31c8676 Iustin Pop
    This runs on the master node.
2466 b31c8676 Iustin Pop

2467 b31c8676 Iustin Pop
    """
2468 b31c8676 Iustin Pop
    env = {
2469 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2470 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2471 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2472 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2473 b31c8676 Iustin Pop
      }
2474 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2475 b31c8676 Iustin Pop
          self.op.node_name]
2476 b31c8676 Iustin Pop
    return env, nl, nl
2477 b31c8676 Iustin Pop
2478 b31c8676 Iustin Pop
  def CheckPrereq(self):
2479 b31c8676 Iustin Pop
    """Check prerequisites.
2480 b31c8676 Iustin Pop

2481 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2482 b31c8676 Iustin Pop

2483 b31c8676 Iustin Pop
    """
2484 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2485 b31c8676 Iustin Pop
2486 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2487 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2488 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2489 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2490 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2491 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2492 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2493 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2494 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2495 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2496 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2497 3a5ba66a Iustin Pop
        if self.op.force:
2498 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2499 3e83dd48 Iustin Pop
        else:
2500 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2501 3e83dd48 Iustin Pop
2502 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2503 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2504 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2505 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2506 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2507 3a5ba66a Iustin Pop
2508 b31c8676 Iustin Pop
    return
2509 b31c8676 Iustin Pop
2510 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2511 b31c8676 Iustin Pop
    """Modifies a node.
2512 b31c8676 Iustin Pop

2513 b31c8676 Iustin Pop
    """
2514 3a5ba66a Iustin Pop
    node = self.node
2515 b31c8676 Iustin Pop
2516 b31c8676 Iustin Pop
    result = []
2517 c9d443ea Iustin Pop
    changed_mc = False
2518 b31c8676 Iustin Pop
2519 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2520 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2521 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2522 c9d443ea Iustin Pop
      if self.op.offline == True:
2523 c9d443ea Iustin Pop
        if node.master_candidate:
2524 c9d443ea Iustin Pop
          node.master_candidate = False
2525 c9d443ea Iustin Pop
          changed_mc = True
2526 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2527 c9d443ea Iustin Pop
        if node.drained:
2528 c9d443ea Iustin Pop
          node.drained = False
2529 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2530 3a5ba66a Iustin Pop
2531 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2532 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2533 c9d443ea Iustin Pop
      changed_mc = True
2534 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2535 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2536 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2537 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
2538 0959c824 Iustin Pop
        if msg:
2539 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2540 b31c8676 Iustin Pop
2541 c9d443ea Iustin Pop
    if self.op.drained is not None:
2542 c9d443ea Iustin Pop
      node.drained = self.op.drained
2543 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2544 c9d443ea Iustin Pop
      if self.op.drained == True:
2545 c9d443ea Iustin Pop
        if node.master_candidate:
2546 c9d443ea Iustin Pop
          node.master_candidate = False
2547 c9d443ea Iustin Pop
          changed_mc = True
2548 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2549 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2550 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2551 dec0d9da Iustin Pop
          if msg:
2552 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2553 c9d443ea Iustin Pop
        if node.offline:
2554 c9d443ea Iustin Pop
          node.offline = False
2555 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2556 c9d443ea Iustin Pop
2557 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2558 b31c8676 Iustin Pop
    self.cfg.Update(node)
2559 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2560 c9d443ea Iustin Pop
    if changed_mc:
2561 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2562 b31c8676 Iustin Pop
2563 b31c8676 Iustin Pop
    return result
2564 b31c8676 Iustin Pop
2565 b31c8676 Iustin Pop
2566 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
2567 f5118ade Iustin Pop
  """Powercycles a node.
2568 f5118ade Iustin Pop

2569 f5118ade Iustin Pop
  """
2570 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
2571 f5118ade Iustin Pop
  REQ_BGL = False
2572 f5118ade Iustin Pop
2573 f5118ade Iustin Pop
  def CheckArguments(self):
2574 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2575 f5118ade Iustin Pop
    if node_name is None:
2576 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2577 f5118ade Iustin Pop
    self.op.node_name = node_name
2578 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2579 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
2580 f5118ade Iustin Pop
                                 " parameter was not set")
2581 f5118ade Iustin Pop
2582 f5118ade Iustin Pop
  def ExpandNames(self):
2583 f5118ade Iustin Pop
    """Locking for PowercycleNode.
2584 f5118ade Iustin Pop

2585 f5118ade Iustin Pop
    This is a last-resource option and shouldn't block on other
2586 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
2587 f5118ade Iustin Pop

2588 f5118ade Iustin Pop
    """
2589 f5118ade Iustin Pop
    self.needed_locks = {}
2590 f5118ade Iustin Pop
2591 f5118ade Iustin Pop
  def CheckPrereq(self):
2592 f5118ade Iustin Pop
    """Check prerequisites.
2593 f5118ade Iustin Pop

2594 f5118ade Iustin Pop
    This LU has no prereqs.
2595 f5118ade Iustin Pop

2596 f5118ade Iustin Pop
    """
2597 f5118ade Iustin Pop
    pass
2598 f5118ade Iustin Pop
2599 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
2600 f5118ade Iustin Pop
    """Reboots a node.
2601 f5118ade Iustin Pop

2602 f5118ade Iustin Pop
    """
2603 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
2604 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
2605 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
2606 f5118ade Iustin Pop
    return result.payload
2607 f5118ade Iustin Pop
2608 f5118ade Iustin Pop
2609 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2610 a8083063 Iustin Pop
  """Query cluster configuration.
2611 a8083063 Iustin Pop

2612 a8083063 Iustin Pop
  """
2613 a8083063 Iustin Pop
  _OP_REQP = []
2614 642339cf Guido Trotter
  REQ_BGL = False
2615 642339cf Guido Trotter
2616 642339cf Guido Trotter
  def ExpandNames(self):
2617 642339cf Guido Trotter
    self.needed_locks = {}
2618 a8083063 Iustin Pop
2619 a8083063 Iustin Pop
  def CheckPrereq(self):
2620 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2621 a8083063 Iustin Pop

2622 a8083063 Iustin Pop
    """
2623 a8083063 Iustin Pop
    pass
2624 a8083063 Iustin Pop
2625 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2626 a8083063 Iustin Pop
    """Return cluster config.
2627 a8083063 Iustin Pop

2628 a8083063 Iustin Pop
    """
2629 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2630 a8083063 Iustin Pop
    result = {
2631 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2632 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2633 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2634 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
2635 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2636 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2637 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2638 469f88e1 Iustin Pop
      "master": cluster.master_node,
2639 066f465d Guido Trotter
      "default_hypervisor": cluster.enabled_hypervisors[0],
2640 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2641 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
2642 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
2643 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2644 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
2645 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2646 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2647 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2648 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2649 a8083063 Iustin Pop
      }
2650 a8083063 Iustin Pop
2651 a8083063 Iustin Pop
    return result
2652 a8083063 Iustin Pop
2653 a8083063 Iustin Pop
2654 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2655 ae5849b5 Michael Hanselmann
  """Return configuration values.
2656 a8083063 Iustin Pop

2657 a8083063 Iustin Pop
  """
2658 a8083063 Iustin Pop
  _OP_REQP = []
2659 642339cf Guido Trotter
  REQ_BGL = False
2660 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2661 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2662 642339cf Guido Trotter
2663 642339cf Guido Trotter
  def ExpandNames(self):
2664 642339cf Guido Trotter
    self.needed_locks = {}
2665 a8083063 Iustin Pop
2666 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2667 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2668 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2669 ae5849b5 Michael Hanselmann
2670 a8083063 Iustin Pop
  def CheckPrereq(self):
2671 a8083063 Iustin Pop
    """No prerequisites.
2672 a8083063 Iustin Pop

2673 a8083063 Iustin Pop
    """
2674 a8083063 Iustin Pop
    pass
2675 a8083063 Iustin Pop
2676 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2677 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2678 a8083063 Iustin Pop

2679 a8083063 Iustin Pop
    """
2680 ae5849b5 Michael Hanselmann
    values = []
2681 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2682 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2683 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2684 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2685 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2686 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2687 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2688 ae5849b5 Michael Hanselmann
      else:
2689 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2690 3ccafd0e Iustin Pop
      values.append(entry)
2691 ae5849b5 Michael Hanselmann
    return values
2692 a8083063 Iustin Pop
2693 a8083063 Iustin Pop
2694 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2695 a8083063 Iustin Pop
  """Bring up an instance's disks.
2696 a8083063 Iustin Pop

2697 a8083063 Iustin Pop
  """
2698 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2699 f22a8ba3 Guido Trotter
  REQ_BGL = False
2700 f22a8ba3 Guido Trotter
2701 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2702 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2703 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2704 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2705 f22a8ba3 Guido Trotter
2706 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2707 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2708 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2709 a8083063 Iustin Pop
2710 a8083063 Iustin Pop
  def CheckPrereq(self):
2711 a8083063 Iustin Pop
    """Check prerequisites.
2712 a8083063 Iustin Pop

2713 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2714 a8083063 Iustin Pop

2715 a8083063 Iustin Pop
    """
2716 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2717 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2718 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2719 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2720 a8083063 Iustin Pop
2721 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2722 a8083063 Iustin Pop
    """Activate the disks.
2723 a8083063 Iustin Pop

2724 a8083063 Iustin Pop
    """
2725 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2726 a8083063 Iustin Pop
    if not disks_ok:
2727 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2728 a8083063 Iustin Pop
2729 a8083063 Iustin Pop
    return disks_info
2730 a8083063 Iustin Pop
2731 a8083063 Iustin Pop
2732 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2733 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2734 a8083063 Iustin Pop

2735 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2736 a8083063 Iustin Pop

2737 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2738 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2739 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2740 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2741 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2742 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2743 e4376078 Iustin Pop
      won't result in an error return from the function
2744 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2745 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2746 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
  """
2749 a8083063 Iustin Pop
  device_info = []
2750 a8083063 Iustin Pop
  disks_ok = True
2751 fdbd668d Iustin Pop
  iname = instance.name
2752 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2753 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2754 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2755 fdbd668d Iustin Pop
2756 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2757 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2758 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2759 fdbd668d Iustin Pop
  # SyncSource, etc.)
2760 fdbd668d Iustin Pop
2761 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2762 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2763 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2764 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2765 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2766 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2767 53c14ef1 Iustin Pop
      if msg:
2768 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2769 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2770 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2771 fdbd668d Iustin Pop
        if not ignore_secondaries:
2772 a8083063 Iustin Pop
          disks_ok = False
2773 fdbd668d Iustin Pop
2774 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2775 fdbd668d Iustin Pop
2776 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2777 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2778 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2779 fdbd668d Iustin Pop
      if node != instance.primary_node:
2780 fdbd668d Iustin Pop
        continue
2781 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2782 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2783 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2784 53c14ef1 Iustin Pop
      if msg:
2785 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2786 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2787 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2788 fdbd668d Iustin Pop
        disks_ok = False
2789 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2790 1dff8e07 Iustin Pop
                        result.payload))
2791 a8083063 Iustin Pop
2792 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2793 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2794 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2795 b352ab5b Iustin Pop
  for disk in instance.disks:
2796 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2797 b352ab5b Iustin Pop
2798 a8083063 Iustin Pop
  return disks_ok, device_info
2799 a8083063 Iustin Pop
2800 a8083063 Iustin Pop
2801 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2802 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2803 3ecf6786 Iustin Pop

2804 3ecf6786 Iustin Pop
  """
2805 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2806 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2807 fe7b0351 Michael Hanselmann
  if not disks_ok:
2808 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2809 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2810 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2811 86d9d3bb Iustin Pop
                         " secondary node,"
2812 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2813 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2814 fe7b0351 Michael Hanselmann
2815 fe7b0351 Michael Hanselmann
2816 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2817 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2818 a8083063 Iustin Pop

2819 a8083063 Iustin Pop
  """
2820 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2821 f22a8ba3 Guido Trotter
  REQ_BGL = False
2822 f22a8ba3 Guido Trotter
2823 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2824 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2825 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2826 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2827 f22a8ba3 Guido Trotter
2828 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2829 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2830 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2831 a8083063 Iustin Pop
2832 a8083063 Iustin Pop
  def CheckPrereq(self):
2833 a8083063 Iustin Pop
    """Check prerequisites.
2834 a8083063 Iustin Pop

2835 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2836 a8083063 Iustin Pop

2837 a8083063 Iustin Pop
    """
2838 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2839 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2840 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2841 a8083063 Iustin Pop
2842 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2843 a8083063 Iustin Pop
    """Deactivate the disks
2844 a8083063 Iustin Pop

2845 a8083063 Iustin Pop
    """
2846 a8083063 Iustin Pop
    instance = self.instance
2847 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
2850 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2851 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2852 155d6c75 Guido Trotter

2853 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2854 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2855 155d6c75 Guido Trotter

2856 155d6c75 Guido Trotter
  """
2857 aca13712 Iustin Pop
  pnode = instance.primary_node
2858 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2859 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
2860 aca13712 Iustin Pop
2861 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
2862 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2863 155d6c75 Guido Trotter
                             " block devices.")
2864 155d6c75 Guido Trotter
2865 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2866 a8083063 Iustin Pop
2867 a8083063 Iustin Pop
2868 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2869 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2870 a8083063 Iustin Pop

2871 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2872 a8083063 Iustin Pop

2873 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2874 a8083063 Iustin Pop
  ignored.
2875 a8083063 Iustin Pop

2876 a8083063 Iustin Pop
  """
2877 cacfd1fd Iustin Pop
  all_result = True
2878 a8083063 Iustin Pop
  for disk in instance.disks:
2879 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2880 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2881 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2882 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2883 cacfd1fd Iustin Pop
      if msg:
2884 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2885 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2886 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2887 cacfd1fd Iustin Pop
          all_result = False
2888 cacfd1fd Iustin Pop
  return all_result
2889 a8083063 Iustin Pop
2890 a8083063 Iustin Pop
2891 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2892 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2893 d4f16fd9 Iustin Pop

2894 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2895 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2896 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2897 d4f16fd9 Iustin Pop
  exception.
2898 d4f16fd9 Iustin Pop

2899 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2900 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2901 e69d05fd Iustin Pop
  @type node: C{str}
2902 e69d05fd Iustin Pop
  @param node: the node to check
2903 e69d05fd Iustin Pop
  @type reason: C{str}
2904 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2905 e69d05fd Iustin Pop
  @type requested: C{int}
2906 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2907 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2908 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2909 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2910 e69d05fd Iustin Pop
      we cannot check the node
2911 d4f16fd9 Iustin Pop

2912 d4f16fd9 Iustin Pop
  """
2913 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2914 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2915 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2916 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2917 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2918 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
2919 d4f16fd9 Iustin Pop
  if requested > free_mem:
2920 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2921 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
2922 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
2923 d4f16fd9 Iustin Pop
2924 d4f16fd9 Iustin Pop
2925 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2926 a8083063 Iustin Pop
  """Starts an instance.
2927 a8083063 Iustin Pop

2928 a8083063 Iustin Pop
  """
2929 a8083063 Iustin Pop
  HPATH = "instance-start"
2930 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2931 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2932 e873317a Guido Trotter
  REQ_BGL = False
2933 e873317a Guido Trotter
2934 e873317a Guido Trotter
  def ExpandNames(self):
2935 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2936 a8083063 Iustin Pop
2937 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2938 a8083063 Iustin Pop
    """Build hooks env.
2939 a8083063 Iustin Pop

2940 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2941 a8083063 Iustin Pop

2942 a8083063 Iustin Pop
    """
2943 a8083063 Iustin Pop
    env = {
2944 a8083063 Iustin Pop
      "FORCE": self.op.force,
2945 a8083063 Iustin Pop
      }
2946 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2947 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2948 a8083063 Iustin Pop
    return env, nl, nl
2949 a8083063 Iustin Pop
2950 a8083063 Iustin Pop
  def CheckPrereq(self):
2951 a8083063 Iustin Pop
    """Check prerequisites.
2952 a8083063 Iustin Pop

2953 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2954 a8083063 Iustin Pop

2955 a8083063 Iustin Pop
    """
2956 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2957 e873317a Guido Trotter
    assert self.instance is not None, \
2958 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2959 a8083063 Iustin Pop
2960 d04aaa2f Iustin Pop
    # extra beparams
2961 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2962 d04aaa2f Iustin Pop
    if self.beparams:
2963 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2964 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2965 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2966 d04aaa2f Iustin Pop
      # fill the beparams dict
2967 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2968 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2969 d04aaa2f Iustin Pop
2970 d04aaa2f Iustin Pop
    # extra hvparams
2971 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2972 d04aaa2f Iustin Pop
    if self.hvparams:
2973 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2974 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2975 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2976 d04aaa2f Iustin Pop
2977 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2978 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2979 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2980 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2981 d04aaa2f Iustin Pop
                                    instance.hvparams)
2982 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2983 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2984 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
2985 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2986 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
2987 d04aaa2f Iustin Pop
2988 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2989 7527a8a4 Iustin Pop
2990 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2991 5bbd3f7f Michael Hanselmann
    # check bridges existence
2992 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2993 a8083063 Iustin Pop
2994 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2995 f1926756 Guido Trotter
                                              instance.name,
2996 f1926756 Guido Trotter
                                              instance.hypervisor)
2997 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2998 4c4e4e1e Iustin Pop
                      prereq=True)
2999 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
3000 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3001 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3002 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3003 d4f16fd9 Iustin Pop
3004 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3005 a8083063 Iustin Pop
    """Start the instance.
3006 a8083063 Iustin Pop

3007 a8083063 Iustin Pop
    """
3008 a8083063 Iustin Pop
    instance = self.instance
3009 a8083063 Iustin Pop
    force = self.op.force
3010 a8083063 Iustin Pop
3011 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3012 fe482621 Iustin Pop
3013 a8083063 Iustin Pop
    node_current = instance.primary_node
3014 a8083063 Iustin Pop
3015 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3016 a8083063 Iustin Pop
3017 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3018 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3019 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3020 dd279568 Iustin Pop
    if msg:
3021 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3022 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3023 a8083063 Iustin Pop
3024 a8083063 Iustin Pop
3025 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3026 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3027 bf6929a2 Alexander Schreiber

3028 bf6929a2 Alexander Schreiber
  """
3029 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3030 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3031 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3032 e873317a Guido Trotter
  REQ_BGL = False
3033 e873317a Guido Trotter
3034 e873317a Guido Trotter
  def ExpandNames(self):
3035 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3036 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3037 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3038 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3039 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3040 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3041 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3042 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3043 bf6929a2 Alexander Schreiber
3044 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3045 bf6929a2 Alexander Schreiber
    """Build hooks env.
3046 bf6929a2 Alexander Schreiber

3047 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3048 bf6929a2 Alexander Schreiber

3049 bf6929a2 Alexander Schreiber
    """
3050 bf6929a2 Alexander Schreiber
    env = {
3051 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3052 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3053 bf6929a2 Alexander Schreiber
      }
3054 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3055 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3056 bf6929a2 Alexander Schreiber
    return env, nl, nl
3057 bf6929a2 Alexander Schreiber
3058 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3059 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3060 bf6929a2 Alexander Schreiber

3061 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3062 bf6929a2 Alexander Schreiber

3063 bf6929a2 Alexander Schreiber
    """
3064 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3065 e873317a Guido Trotter
    assert self.instance is not None, \
3066 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3067 bf6929a2 Alexander Schreiber
3068 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3069 7527a8a4 Iustin Pop
3070 5bbd3f7f Michael Hanselmann
    # check bridges existence
3071 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3072 bf6929a2 Alexander Schreiber
3073 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3074 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3075 bf6929a2 Alexander Schreiber

3076 bf6929a2 Alexander Schreiber
    """
3077 bf6929a2 Alexander Schreiber
    instance = self.instance
3078 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3079 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3080 bf6929a2 Alexander Schreiber
3081 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3082 bf6929a2 Alexander Schreiber
3083 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3084 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3085 ae48ac32 Iustin Pop
      for disk in instance.disks:
3086 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3087 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3088 07813a9e Iustin Pop
                                             reboot_type)
3089 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3090 bf6929a2 Alexander Schreiber
    else:
3091 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3092 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3093 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3094 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3095 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3096 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3097 dd279568 Iustin Pop
      if msg:
3098 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3099 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3100 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3101 bf6929a2 Alexander Schreiber
3102 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3103 bf6929a2 Alexander Schreiber
3104 bf6929a2 Alexander Schreiber
3105 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3106 a8083063 Iustin Pop
  """Shutdown an instance.
3107 a8083063 Iustin Pop

3108 a8083063 Iustin Pop
  """
3109 a8083063 Iustin Pop
  HPATH = "instance-stop"
3110 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3111 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3112 e873317a Guido Trotter
  REQ_BGL = False
3113 e873317a Guido Trotter
3114 e873317a Guido Trotter
  def ExpandNames(self):
3115 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3116 a8083063 Iustin Pop
3117 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3118 a8083063 Iustin Pop
    """Build hooks env.
3119 a8083063 Iustin Pop

3120 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3121 a8083063 Iustin Pop

3122 a8083063 Iustin Pop
    """
3123 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3124 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3125 a8083063 Iustin Pop
    return env, nl, nl
3126 a8083063 Iustin Pop
3127 a8083063 Iustin Pop
  def CheckPrereq(self):
3128 a8083063 Iustin Pop
    """Check prerequisites.
3129 a8083063 Iustin Pop

3130 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3131 a8083063 Iustin Pop

3132 a8083063 Iustin Pop
    """
3133 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3134 e873317a Guido Trotter
    assert self.instance is not None, \
3135 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3136 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3137 a8083063 Iustin Pop
3138 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3139 a8083063 Iustin Pop
    """Shutdown the instance.
3140 a8083063 Iustin Pop

3141 a8083063 Iustin Pop
    """
3142 a8083063 Iustin Pop
    instance = self.instance
3143 a8083063 Iustin Pop
    node_current = instance.primary_node
3144 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3145 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3146 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3147 1fae010f Iustin Pop
    if msg:
3148 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3149 a8083063 Iustin Pop
3150 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3151 a8083063 Iustin Pop
3152 a8083063 Iustin Pop
3153 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3154 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3155 fe7b0351 Michael Hanselmann

3156 fe7b0351 Michael Hanselmann
  """
3157 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3158 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3159 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3160 4e0b4d2d Guido Trotter
  REQ_BGL = False
3161 4e0b4d2d Guido Trotter
3162 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3163 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3164 fe7b0351 Michael Hanselmann
3165 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3166 fe7b0351 Michael Hanselmann
    """Build hooks env.
3167 fe7b0351 Michael Hanselmann

3168 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3169 fe7b0351 Michael Hanselmann

3170 fe7b0351 Michael Hanselmann
    """
3171 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3172 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3173 fe7b0351 Michael Hanselmann
    return env, nl, nl
3174 fe7b0351 Michael Hanselmann
3175 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3176 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3177 fe7b0351 Michael Hanselmann

3178 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3179 fe7b0351 Michael Hanselmann

3180 fe7b0351 Michael Hanselmann
    """
3181 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3182 4e0b4d2d Guido Trotter
    assert instance is not None, \
3183 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3184 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3185 4e0b4d2d Guido Trotter
3186 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3187 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3188 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3189 0d68c45d Iustin Pop
    if instance.admin_up:
3190 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3191 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3192 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3193 72737a7f Iustin Pop
                                              instance.name,
3194 72737a7f Iustin Pop
                                              instance.hypervisor)
3195 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3196 4c4e4e1e Iustin Pop
                      prereq=True)
3197 7ad1af4a Iustin Pop
    if remote_info.payload:
3198 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3199 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3200 3ecf6786 Iustin Pop
                                  instance.primary_node))
3201 d0834de3 Michael Hanselmann
3202 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3203 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3204 d0834de3 Michael Hanselmann
      # OS verification
3205 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3206 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3207 d0834de3 Michael Hanselmann
      if pnode is None:
3208 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3209 3ecf6786 Iustin Pop
                                   self.op.pnode)
3210 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3211 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3212 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3213 d0834de3 Michael Hanselmann
3214 fe7b0351 Michael Hanselmann
    self.instance = instance
3215 fe7b0351 Michael Hanselmann
3216 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3217 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3218 fe7b0351 Michael Hanselmann

3219 fe7b0351 Michael Hanselmann
    """
3220 fe7b0351 Michael Hanselmann
    inst = self.instance
3221 fe7b0351 Michael Hanselmann
3222 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3223 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3224 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3225 97abc79f Iustin Pop
      self.cfg.Update(inst)
3226 d0834de3 Michael Hanselmann
3227 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3228 fe7b0351 Michael Hanselmann
    try:
3229 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3230 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3231 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3232 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3233 fe7b0351 Michael Hanselmann
    finally:
3234 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3235 fe7b0351 Michael Hanselmann
3236 fe7b0351 Michael Hanselmann
3237 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3238 decd5f45 Iustin Pop
  """Rename an instance.
3239 decd5f45 Iustin Pop

3240 decd5f45 Iustin Pop
  """
3241 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3242 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3243 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3244 decd5f45 Iustin Pop
3245 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3246 decd5f45 Iustin Pop
    """Build hooks env.
3247 decd5f45 Iustin Pop

3248 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3249 decd5f45 Iustin Pop

3250 decd5f45 Iustin Pop
    """
3251 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3252 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3253 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3254 decd5f45 Iustin Pop
    return env, nl, nl
3255 decd5f45 Iustin Pop
3256 decd5f45 Iustin Pop
  def CheckPrereq(self):
3257 decd5f45 Iustin Pop
    """Check prerequisites.
3258 decd5f45 Iustin Pop

3259 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3260 decd5f45 Iustin Pop

3261 decd5f45 Iustin Pop
    """
3262 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3263 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3264 decd5f45 Iustin Pop
    if instance is None:
3265 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3266 decd5f45 Iustin Pop
                                 self.op.instance_name)
3267 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3268 7527a8a4 Iustin Pop
3269 0d68c45d Iustin Pop
    if instance.admin_up:
3270 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3271 decd5f45 Iustin Pop
                                 self.op.instance_name)
3272 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3273 72737a7f Iustin Pop
                                              instance.name,
3274 72737a7f Iustin Pop
                                              instance.hypervisor)
3275 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3276 4c4e4e1e Iustin Pop
                      prereq=True)
3277 7ad1af4a Iustin Pop
    if remote_info.payload:
3278 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3279 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3280 decd5f45 Iustin Pop
                                  instance.primary_node))
3281 decd5f45 Iustin Pop
    self.instance = instance
3282 decd5f45 Iustin Pop
3283 decd5f45 Iustin Pop
    # new name verification
3284 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3285 decd5f45 Iustin Pop
3286 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3287 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3288 7bde3275 Guido Trotter
    if new_name in instance_list:
3289 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3290 c09f363f Manuel Franceschini
                                 new_name)
3291 7bde3275 Guido Trotter
3292 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3293 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3294 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3295 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3296 decd5f45 Iustin Pop
3297 decd5f45 Iustin Pop
3298 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3299 decd5f45 Iustin Pop
    """Reinstall the instance.
3300 decd5f45 Iustin Pop

3301 decd5f45 Iustin Pop
    """
3302 decd5f45 Iustin Pop
    inst = self.instance
3303 decd5f45 Iustin Pop
    old_name = inst.name
3304 decd5f45 Iustin Pop
3305 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3306 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3307 b23c4333 Manuel Franceschini
3308 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3309 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3310 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3311 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3312 decd5f45 Iustin Pop
3313 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3314 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3315 decd5f45 Iustin Pop
3316 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3317 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3318 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3319 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3320 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3321 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3322 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3323 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3324 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3325 b23c4333 Manuel Franceschini
3326 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3327 decd5f45 Iustin Pop
    try:
3328 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3329 781de953 Iustin Pop
                                                 old_name)
3330 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3331 96841384 Iustin Pop
      if msg:
3332 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3333 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3334 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3335 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3336 decd5f45 Iustin Pop
    finally:
3337 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3338 decd5f45 Iustin Pop
3339 decd5f45 Iustin Pop
3340 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3341 a8083063 Iustin Pop
  """Remove an instance.
3342 a8083063 Iustin Pop

3343 a8083063 Iustin Pop
  """
3344 a8083063 Iustin Pop
  HPATH = "instance-remove"
3345 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3346 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3347 cf472233 Guido Trotter
  REQ_BGL = False
3348 cf472233 Guido Trotter
3349 cf472233 Guido Trotter
  def ExpandNames(self):
3350 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3351 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3352 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3353 cf472233 Guido Trotter
3354 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3355 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3356 cf472233 Guido Trotter
      self._LockInstancesNodes()
3357 a8083063 Iustin Pop
3358 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3359 a8083063 Iustin Pop
    """Build hooks env.
3360 a8083063 Iustin Pop

3361 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3362 a8083063 Iustin Pop

3363 a8083063 Iustin Pop
    """
3364 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3365 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3366 a8083063 Iustin Pop
    return env, nl, nl
3367 a8083063 Iustin Pop
3368 a8083063 Iustin Pop
  def CheckPrereq(self):
3369 a8083063 Iustin Pop
    """Check prerequisites.
3370 a8083063 Iustin Pop

3371 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3372 a8083063 Iustin Pop

3373 a8083063 Iustin Pop
    """
3374 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3375 cf472233 Guido Trotter
    assert self.instance is not None, \
3376 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3377 a8083063 Iustin Pop
3378 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3379 a8083063 Iustin Pop
    """Remove the instance.
3380 a8083063 Iustin Pop

3381 a8083063 Iustin Pop
    """
3382 a8083063 Iustin Pop
    instance = self.instance
3383 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3384 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3385 a8083063 Iustin Pop
3386 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3387 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3388 1fae010f Iustin Pop
    if msg:
3389 1d67656e Iustin Pop
      if self.op.ignore_failures:
3390 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3391 1d67656e Iustin Pop
      else:
3392 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3393 1fae010f Iustin Pop
                                 " node %s: %s" %
3394 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3395 a8083063 Iustin Pop
3396 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3397 a8083063 Iustin Pop
3398 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3399 1d67656e Iustin Pop
      if self.op.ignore_failures:
3400 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3401 1d67656e Iustin Pop
      else:
3402 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3403 a8083063 Iustin Pop
3404 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3405 a8083063 Iustin Pop
3406 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3407 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
3410 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3411 a8083063 Iustin Pop
  """Logical unit for querying instances.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
  """
3414 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3415 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3416 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3417 5b460366 Iustin Pop
                                    "admin_state",
3418 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3419 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
3420 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3421 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3422 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3423 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3424 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
3425 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
3426 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
3427 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3428 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3429 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3430 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3431 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3432 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3433 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3434 31bf511f Iustin Pop
3435 a8083063 Iustin Pop
3436 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3437 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3438 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3439 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3440 a8083063 Iustin Pop
3441 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3442 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3443 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3444 7eb9d8f7 Guido Trotter
3445 57a2fb91 Iustin Pop
    if self.op.names:
3446 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3447 7eb9d8f7 Guido Trotter
    else:
3448 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3449 7eb9d8f7 Guido Trotter
3450 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3451 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3452 57a2fb91 Iustin Pop
    if self.do_locking:
3453 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3454 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3455 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3456 7eb9d8f7 Guido Trotter
3457 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3458 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3459 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3460 7eb9d8f7 Guido Trotter
3461 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3462 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3463 7eb9d8f7 Guido Trotter

3464 7eb9d8f7 Guido Trotter
    """
3465 57a2fb91 Iustin Pop
    pass
3466 069dcc86 Iustin Pop
3467 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3468 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3469 a8083063 Iustin Pop

3470 a8083063 Iustin Pop
    """
3471 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3472 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3473 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3474 a7f5dc98 Iustin Pop
      if self.do_locking:
3475 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3476 a7f5dc98 Iustin Pop
      else:
3477 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3478 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3479 57a2fb91 Iustin Pop
    else:
3480 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3481 a7f5dc98 Iustin Pop
      if self.do_locking:
3482 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3483 a7f5dc98 Iustin Pop
      else:
3484 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3485 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3486 a7f5dc98 Iustin Pop
      if missing:
3487 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3488 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3489 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3490 c1f1cbb2 Iustin Pop
3491 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3492 a8083063 Iustin Pop
3493 a8083063 Iustin Pop
    # begin data gathering
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3496 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3497 a8083063 Iustin Pop
3498 a8083063 Iustin Pop
    bad_nodes = []
3499 cbfc4681 Iustin Pop
    off_nodes = []
3500 ec79568d Iustin Pop
    if self.do_node_query:
3501 a8083063 Iustin Pop
      live_data = {}
3502 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3503 a8083063 Iustin Pop
      for name in nodes:
3504 a8083063 Iustin Pop
        result = node_data[name]
3505 cbfc4681 Iustin Pop
        if result.offline:
3506 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3507 cbfc4681 Iustin Pop
          off_nodes.append(name)
3508 4c4e4e1e Iustin Pop
        if result.failed or result.fail_msg:
3509 a8083063 Iustin Pop
          bad_nodes.append(name)
3510 781de953 Iustin Pop
        else:
3511 2fa74ef4 Iustin Pop
          if result.payload:
3512 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
3513 2fa74ef4 Iustin Pop
          # else no instance is alive
3514 a8083063 Iustin Pop
    else:
3515 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3516 a8083063 Iustin Pop
3517 a8083063 Iustin Pop
    # end data gathering
3518 a8083063 Iustin Pop
3519 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3520 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3521 a8083063 Iustin Pop
    output = []
3522 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
3523 a8083063 Iustin Pop
    for instance in instance_list:
3524 a8083063 Iustin Pop
      iout = []
3525 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
3526 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
3527 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
3528 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
3529 a8083063 Iustin Pop
      for field in self.op.output_fields:
3530 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3531 a8083063 Iustin Pop
        if field == "name":
3532 a8083063 Iustin Pop
          val = instance.name
3533 a8083063 Iustin Pop
        elif field == "os":
3534 a8083063 Iustin Pop
          val = instance.os
3535 a8083063 Iustin Pop
        elif field == "pnode":
3536 a8083063 Iustin Pop
          val = instance.primary_node
3537 a8083063 Iustin Pop
        elif field == "snodes":
3538 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3539 a8083063 Iustin Pop
        elif field == "admin_state":
3540 0d68c45d Iustin Pop
          val = instance.admin_up
3541 a8083063 Iustin Pop
        elif field == "oper_state":
3542 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3543 8a23d2d3 Iustin Pop
            val = None
3544 a8083063 Iustin Pop
          else:
3545 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3546 d8052456 Iustin Pop
        elif field == "status":
3547 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3548 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3549 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3550 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3551 d8052456 Iustin Pop
          else:
3552 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3553 d8052456 Iustin Pop
            if running:
3554 0d68c45d Iustin Pop
              if instance.admin_up:
3555 d8052456 Iustin Pop
                val = "running"
3556 d8052456 Iustin Pop
              else:
3557 d8052456 Iustin Pop
                val = "ERROR_up"
3558 d8052456 Iustin Pop
            else:
3559 0d68c45d Iustin Pop
              if instance.admin_up:
3560 d8052456 Iustin Pop
                val = "ERROR_down"
3561 d8052456 Iustin Pop
              else:
3562 d8052456 Iustin Pop
                val = "ADMIN_down"
3563 a8083063 Iustin Pop
        elif field == "oper_ram":
3564 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3565 8a23d2d3 Iustin Pop
            val = None
3566 a8083063 Iustin Pop
          elif instance.name in live_data:
3567 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3568 a8083063 Iustin Pop
          else:
3569 a8083063 Iustin Pop
            val = "-"
3570 c1ce76bb Iustin Pop
        elif field == "vcpus":
3571 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3572 a8083063 Iustin Pop
        elif field == "disk_template":
3573 a8083063 Iustin Pop
          val = instance.disk_template
3574 a8083063 Iustin Pop
        elif field == "ip":
3575 39a02558 Guido Trotter
          if instance.nics:
3576 39a02558 Guido Trotter
            val = instance.nics[0].ip
3577 39a02558 Guido Trotter
          else:
3578 39a02558 Guido Trotter
            val = None
3579 638c6349 Guido Trotter
        elif field == "nic_mode":
3580 638c6349 Guido Trotter
          if instance.nics:
3581 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
3582 638c6349 Guido Trotter
          else:
3583 638c6349 Guido Trotter
            val = None
3584 638c6349 Guido Trotter
        elif field == "nic_link":
3585 39a02558 Guido Trotter
          if instance.nics:
3586 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3587 638c6349 Guido Trotter
          else:
3588 638c6349 Guido Trotter
            val = None
3589 638c6349 Guido Trotter
        elif field == "bridge":
3590 638c6349 Guido Trotter
          if (instance.nics and
3591 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
3592 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3593 39a02558 Guido Trotter
          else:
3594 39a02558 Guido Trotter
            val = None
3595 a8083063 Iustin Pop
        elif field == "mac":
3596 39a02558 Guido Trotter
          if instance.nics:
3597 39a02558 Guido Trotter
            val = instance.nics[0].mac
3598 39a02558 Guido Trotter
          else:
3599 39a02558 Guido Trotter
            val = None
3600 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3601 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3602 ad24e046 Iustin Pop
          try:
3603 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3604 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3605 8a23d2d3 Iustin Pop
            val = None
3606 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3607 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3608 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3609 130a6a6f Iustin Pop
        elif field == "tags":
3610 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3611 38d7239a Iustin Pop
        elif field == "serial_no":
3612 38d7239a Iustin Pop
          val = instance.serial_no
3613 5018a335 Iustin Pop
        elif field == "network_port":
3614 5018a335 Iustin Pop
          val = instance.network_port
3615 338e51e8 Iustin Pop
        elif field == "hypervisor":
3616 338e51e8 Iustin Pop
          val = instance.hypervisor
3617 338e51e8 Iustin Pop
        elif field == "hvparams":
3618 338e51e8 Iustin Pop
          val = i_hv
3619 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3620 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3621 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3622 338e51e8 Iustin Pop
        elif field == "beparams":
3623 338e51e8 Iustin Pop
          val = i_be
3624 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3625 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3626 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3627 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3628 71c1af58 Iustin Pop
          # matches a variable list
3629 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3630 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3631 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3632 71c1af58 Iustin Pop
              val = len(instance.disks)
3633 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3634 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3635 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3636 3e0cea06 Iustin Pop
              try:
3637 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3638 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3639 71c1af58 Iustin Pop
                val = None
3640 71c1af58 Iustin Pop
            else:
3641 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3642 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3643 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3644 71c1af58 Iustin Pop
              val = len(instance.nics)
3645 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3646 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3647 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3648 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3649 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
3650 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
3651 638c6349 Guido Trotter
            elif st_groups[1] == "links":
3652 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
3653 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3654 638c6349 Guido Trotter
              val = []
3655 638c6349 Guido Trotter
              for nicp in i_nicp:
3656 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3657 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
3658 638c6349 Guido Trotter
                else:
3659 638c6349 Guido Trotter
                  val.append(None)
3660 71c1af58 Iustin Pop
            else:
3661 71c1af58 Iustin Pop
              # index-based item
3662 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3663 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3664 71c1af58 Iustin Pop
                val = None
3665 71c1af58 Iustin Pop
              else:
3666 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3667 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3668 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3669 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3670 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
3671 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
3672 638c6349 Guido Trotter
                elif st_groups[1] == "link":
3673 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
3674 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3675 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
3676 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
3677 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
3678 638c6349 Guido Trotter
                  else:
3679 638c6349 Guido Trotter
                    val = None
3680 71c1af58 Iustin Pop
                else:
3681 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3682 71c1af58 Iustin Pop
          else:
3683 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3684 c1ce76bb Iustin Pop
                           field)
3685 a8083063 Iustin Pop
        else:
3686 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3687 a8083063 Iustin Pop
        iout.append(val)
3688 a8083063 Iustin Pop
      output.append(iout)
3689 a8083063 Iustin Pop
3690 a8083063 Iustin Pop
    return output
3691 a8083063 Iustin Pop
3692 a8083063 Iustin Pop
3693 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3694 a8083063 Iustin Pop
  """Failover an instance.
3695 a8083063 Iustin Pop

3696 a8083063 Iustin Pop
  """
3697 a8083063 Iustin Pop
  HPATH = "instance-failover"
3698 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3699 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3700 c9e5c064 Guido Trotter
  REQ_BGL = False
3701 c9e5c064 Guido Trotter
3702 c9e5c064 Guido Trotter
  def ExpandNames(self):
3703 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3704 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3705 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3706 c9e5c064 Guido Trotter
3707 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3708 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3709 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3710 a8083063 Iustin Pop
3711 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3712 a8083063 Iustin Pop
    """Build hooks env.
3713 a8083063 Iustin Pop

3714 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3715 a8083063 Iustin Pop

3716 a8083063 Iustin Pop
    """
3717 a8083063 Iustin Pop
    env = {
3718 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3719 a8083063 Iustin Pop
      }
3720 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3721 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3722 a8083063 Iustin Pop
    return env, nl, nl
3723 a8083063 Iustin Pop
3724 a8083063 Iustin Pop
  def CheckPrereq(self):
3725 a8083063 Iustin Pop
    """Check prerequisites.
3726 a8083063 Iustin Pop

3727 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3728 a8083063 Iustin Pop

3729 a8083063 Iustin Pop
    """
3730 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3731 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3732 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3733 a8083063 Iustin Pop
3734 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3735 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3736 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3737 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3738 2a710df1 Michael Hanselmann
3739 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3740 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3741 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3742 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3743 2a710df1 Michael Hanselmann
3744 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3745 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3746 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3747 d27776f0 Iustin Pop
    if instance.admin_up:
3748 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3749 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3750 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3751 d27776f0 Iustin Pop
                           instance.hypervisor)
3752 d27776f0 Iustin Pop
    else:
3753 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3754 d27776f0 Iustin Pop
                   " instance will not be started")
3755 3a7c308e Guido Trotter
3756 a8083063 Iustin Pop
    # check bridge existance
3757 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3758 a8083063 Iustin Pop
3759 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3760 a8083063 Iustin Pop
    """Failover an instance.
3761 a8083063 Iustin Pop

3762 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3763 a8083063 Iustin Pop
    starting it on the secondary.
3764 a8083063 Iustin Pop

3765 a8083063 Iustin Pop
    """
3766 a8083063 Iustin Pop
    instance = self.instance
3767 a8083063 Iustin Pop
3768 a8083063 Iustin Pop
    source_node = instance.primary_node
3769 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3770 a8083063 Iustin Pop
3771 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3772 a8083063 Iustin Pop
    for dev in instance.disks:
3773 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3774 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3775 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3776 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3777 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3778 a8083063 Iustin Pop
3779 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3780 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3781 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3782 a8083063 Iustin Pop
3783 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3784 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3785 1fae010f Iustin Pop
    if msg:
3786 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3787 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3788 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3789 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3790 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3791 24a40d57 Iustin Pop
      else:
3792 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3793 1fae010f Iustin Pop
                                 " node %s: %s" %
3794 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3795 a8083063 Iustin Pop
3796 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3797 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3798 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3799 a8083063 Iustin Pop
3800 a8083063 Iustin Pop
    instance.primary_node = target_node
3801 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3802 b6102dab Guido Trotter
    self.cfg.Update(instance)
3803 a8083063 Iustin Pop
3804 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3805 0d68c45d Iustin Pop
    if instance.admin_up:
3806 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3807 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3808 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3809 12a0cfbe Guido Trotter
3810 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3811 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3812 12a0cfbe Guido Trotter
      if not disks_ok:
3813 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3814 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3815 a8083063 Iustin Pop
3816 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3817 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3818 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3819 dd279568 Iustin Pop
      if msg:
3820 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3821 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3822 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3823 a8083063 Iustin Pop
3824 a8083063 Iustin Pop
3825 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3826 53c776b5 Iustin Pop
  """Migrate an instance.
3827 53c776b5 Iustin Pop

3828 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3829 53c776b5 Iustin Pop
  which is done with shutdown.
3830 53c776b5 Iustin Pop

3831 53c776b5 Iustin Pop
  """
3832 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3833 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3834 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3835 53c776b5 Iustin Pop
3836 53c776b5 Iustin Pop
  REQ_BGL = False
3837 53c776b5 Iustin Pop
3838 53c776b5 Iustin Pop
  def ExpandNames(self):
3839 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3840 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3841 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3842 53c776b5 Iustin Pop
3843 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3844 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3845 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3846 53c776b5 Iustin Pop
3847 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3848 53c776b5 Iustin Pop
    """Build hooks env.
3849 53c776b5 Iustin Pop

3850 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3851 53c776b5 Iustin Pop

3852 53c776b5 Iustin Pop
    """
3853 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3854 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3855 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3856 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3857 53c776b5 Iustin Pop
    return env, nl, nl
3858 53c776b5 Iustin Pop
3859 53c776b5 Iustin Pop
  def CheckPrereq(self):
3860 53c776b5 Iustin Pop
    """Check prerequisites.
3861 53c776b5 Iustin Pop

3862 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3863 53c776b5 Iustin Pop

3864 53c776b5 Iustin Pop
    """
3865 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3866 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3867 53c776b5 Iustin Pop
    if instance is None:
3868 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3869 53c776b5 Iustin Pop
                                 self.op.instance_name)
3870 53c776b5 Iustin Pop
3871 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3872 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3873 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3874 53c776b5 Iustin Pop
3875 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3876 53c776b5 Iustin Pop
    if not secondary_nodes:
3877 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3878 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3879 53c776b5 Iustin Pop
3880 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3881 53c776b5 Iustin Pop
3882 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3883 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3884 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3885 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3886 53c776b5 Iustin Pop
                         instance.hypervisor)
3887 53c776b5 Iustin Pop
3888 53c776b5 Iustin Pop
    # check bridge existance
3889 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3890 53c776b5 Iustin Pop
3891 53c776b5 Iustin Pop
    if not self.op.cleanup:
3892 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3893 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3894 53c776b5 Iustin Pop
                                                 instance)
3895 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
3896 53c776b5 Iustin Pop
3897 53c776b5 Iustin Pop
    self.instance = instance
3898 53c776b5 Iustin Pop
3899 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3900 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3901 53c776b5 Iustin Pop

3902 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3903 53c776b5 Iustin Pop

3904 53c776b5 Iustin Pop
    """
3905 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3906 53c776b5 Iustin Pop
    all_done = False
3907 53c776b5 Iustin Pop
    while not all_done:
3908 53c776b5 Iustin Pop
      all_done = True
3909 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3910 53c776b5 Iustin Pop
                                            self.nodes_ip,
3911 53c776b5 Iustin Pop
                                            self.instance.disks)
3912 53c776b5 Iustin Pop
      min_percent = 100
3913 53c776b5 Iustin Pop
      for node, nres in result.items():
3914 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
3915 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3916 53c776b5 Iustin Pop
        all_done = all_done and node_done
3917 53c776b5 Iustin Pop
        if node_percent is not None:
3918 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3919 53c776b5 Iustin Pop
      if not all_done:
3920 53c776b5 Iustin Pop
        if min_percent < 100:
3921 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3922 53c776b5 Iustin Pop
        time.sleep(2)
3923 53c776b5 Iustin Pop
3924 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3925 53c776b5 Iustin Pop
    """Demote a node to secondary.
3926 53c776b5 Iustin Pop

3927 53c776b5 Iustin Pop
    """
3928 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3929 53c776b5 Iustin Pop
3930 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3931 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3932 53c776b5 Iustin Pop
3933 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3934 53c776b5 Iustin Pop
                                          self.instance.disks)
3935 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
3936 53c776b5 Iustin Pop
3937 53c776b5 Iustin Pop
  def _GoStandalone(self):
3938 53c776b5 Iustin Pop
    """Disconnect from the network.
3939 53c776b5 Iustin Pop

3940 53c776b5 Iustin Pop
    """
3941 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3942 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3943 53c776b5 Iustin Pop
                                               self.instance.disks)
3944 53c776b5 Iustin Pop
    for node, nres in result.items():
3945 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
3946 53c776b5 Iustin Pop
3947 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3948 53c776b5 Iustin Pop
    """Reconnect to the network.
3949 53c776b5 Iustin Pop

3950 53c776b5 Iustin Pop
    """
3951 53c776b5 Iustin Pop
    if multimaster:
3952 53c776b5 Iustin Pop
      msg = "dual-master"
3953 53c776b5 Iustin Pop
    else:
3954 53c776b5 Iustin Pop
      msg = "single-master"
3955 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3956 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3957 53c776b5 Iustin Pop
                                           self.instance.disks,
3958 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3959 53c776b5 Iustin Pop
    for node, nres in result.items():
3960 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
3961 53c776b5 Iustin Pop
3962 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3963 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3964 53c776b5 Iustin Pop

3965 53c776b5 Iustin Pop
    The cleanup is done by:
3966 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3967 53c776b5 Iustin Pop
        (and update the config if needed)
3968 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3969 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3970 53c776b5 Iustin Pop
      - disconnect from the network
3971 53c776b5 Iustin Pop
      - change disks into single-master mode
3972 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3973 53c776b5 Iustin Pop

3974 53c776b5 Iustin Pop
    """
3975 53c776b5 Iustin Pop
    instance = self.instance
3976 53c776b5 Iustin Pop
    target_node = self.target_node
3977 53c776b5 Iustin Pop
    source_node = self.source_node
3978 53c776b5 Iustin Pop
3979 53c776b5 Iustin Pop
    # check running on only one node
3980 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3981 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3982 53c776b5 Iustin Pop
                     " a bad state)")
3983 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3984 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3985 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
3986 53c776b5 Iustin Pop
3987 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
3988 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
3989 53c776b5 Iustin Pop
3990 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3991 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3992 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3993 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3994 53c776b5 Iustin Pop
                               " and restart this operation.")
3995 53c776b5 Iustin Pop
3996 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3997 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3998 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3999 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4000 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4001 53c776b5 Iustin Pop
4002 53c776b5 Iustin Pop
    if runningon_target:
4003 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4004 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4005 53c776b5 Iustin Pop
                       " updating config" % target_node)
4006 53c776b5 Iustin Pop
      instance.primary_node = target_node
4007 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4008 53c776b5 Iustin Pop
      demoted_node = source_node
4009 53c776b5 Iustin Pop
    else:
4010 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4011 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4012 53c776b5 Iustin Pop
      demoted_node = target_node
4013 53c776b5 Iustin Pop
4014 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4015 53c776b5 Iustin Pop
    try:
4016 53c776b5 Iustin Pop
      self._WaitUntilSync()
4017 53c776b5 Iustin Pop
    except errors.OpExecError:
4018 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4019 53c776b5 Iustin Pop
      # won't be able to sync
4020 53c776b5 Iustin Pop
      pass
4021 53c776b5 Iustin Pop
    self._GoStandalone()
4022 53c776b5 Iustin Pop
    self._GoReconnect(False)
4023 53c776b5 Iustin Pop
    self._WaitUntilSync()
4024 53c776b5 Iustin Pop
4025 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4026 53c776b5 Iustin Pop
4027 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4028 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4029 6906a9d8 Guido Trotter

4030 6906a9d8 Guido Trotter
    """
4031 6906a9d8 Guido Trotter
    target_node = self.target_node
4032 6906a9d8 Guido Trotter
    try:
4033 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4034 6906a9d8 Guido Trotter
      self._GoStandalone()
4035 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4036 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4037 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4038 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4039 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4040 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4041 6906a9d8 Guido Trotter
                      str(err))
4042 6906a9d8 Guido Trotter
4043 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4044 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4045 6906a9d8 Guido Trotter

4046 6906a9d8 Guido Trotter
    """
4047 6906a9d8 Guido Trotter
    instance = self.instance
4048 6906a9d8 Guido Trotter
    target_node = self.target_node
4049 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4050 6906a9d8 Guido Trotter
4051 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4052 6906a9d8 Guido Trotter
                                                    instance,
4053 6906a9d8 Guido Trotter
                                                    migration_info,
4054 6906a9d8 Guido Trotter
                                                    False)
4055 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
4056 6906a9d8 Guido Trotter
    if abort_msg:
4057 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4058 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4059 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4060 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4061 6906a9d8 Guido Trotter
4062 53c776b5 Iustin Pop
  def _ExecMigration(self):
4063 53c776b5 Iustin Pop
    """Migrate an instance.
4064 53c776b5 Iustin Pop

4065 53c776b5 Iustin Pop
    The migrate is done by:
4066 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4067 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4068 53c776b5 Iustin Pop
      - migrate the instance
4069 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4070 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4071 53c776b5 Iustin Pop
      - change disks into single-master mode
4072 53c776b5 Iustin Pop

4073 53c776b5 Iustin Pop
    """
4074 53c776b5 Iustin Pop
    instance = self.instance
4075 53c776b5 Iustin Pop
    target_node = self.target_node
4076 53c776b5 Iustin Pop
    source_node = self.source_node
4077 53c776b5 Iustin Pop
4078 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4079 53c776b5 Iustin Pop
    for dev in instance.disks:
4080 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4081 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4082 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4083 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4084 53c776b5 Iustin Pop
4085 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4086 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4087 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4088 6906a9d8 Guido Trotter
    if msg:
4089 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4090 0959c824 Iustin Pop
                 (source_node, msg))
4091 6906a9d8 Guido Trotter
      logging.error(log_err)
4092 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4093 6906a9d8 Guido Trotter
4094 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4095 6906a9d8 Guido Trotter
4096 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4097 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4098 53c776b5 Iustin Pop
    self._GoStandalone()
4099 53c776b5 Iustin Pop
    self._GoReconnect(True)
4100 53c776b5 Iustin Pop
    self._WaitUntilSync()
4101 53c776b5 Iustin Pop
4102 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4103 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4104 6906a9d8 Guido Trotter
                                           instance,
4105 6906a9d8 Guido Trotter
                                           migration_info,
4106 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4107 6906a9d8 Guido Trotter
4108 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4109 6906a9d8 Guido Trotter
    if msg:
4110 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4111 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4112 6906a9d8 Guido Trotter
      self._AbortMigration()
4113 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4114 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4115 6906a9d8 Guido Trotter
                               (instance.name, msg))
4116 6906a9d8 Guido Trotter
4117 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4118 53c776b5 Iustin Pop
    time.sleep(10)
4119 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4120 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4121 53c776b5 Iustin Pop
                                            self.op.live)
4122 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4123 53c776b5 Iustin Pop
    if msg:
4124 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4125 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4126 6906a9d8 Guido Trotter
      self._AbortMigration()
4127 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4128 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4129 53c776b5 Iustin Pop
                               (instance.name, msg))
4130 53c776b5 Iustin Pop
    time.sleep(10)
4131 53c776b5 Iustin Pop
4132 53c776b5 Iustin Pop
    instance.primary_node = target_node
4133 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4134 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4135 53c776b5 Iustin Pop
4136 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4137 6906a9d8 Guido Trotter
                                              instance,
4138 6906a9d8 Guido Trotter
                                              migration_info,
4139 6906a9d8 Guido Trotter
                                              True)
4140 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4141 6906a9d8 Guido Trotter
    if msg:
4142 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4143 6906a9d8 Guido Trotter
                    " %s" % msg)
4144 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4145 6906a9d8 Guido Trotter
                               msg)
4146 6906a9d8 Guido Trotter
4147 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4148 53c776b5 Iustin Pop
    self._WaitUntilSync()
4149 53c776b5 Iustin Pop
    self._GoStandalone()
4150 53c776b5 Iustin Pop
    self._GoReconnect(False)
4151 53c776b5 Iustin Pop
    self._WaitUntilSync()
4152 53c776b5 Iustin Pop
4153 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4154 53c776b5 Iustin Pop
4155 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4156 53c776b5 Iustin Pop
    """Perform the migration.
4157 53c776b5 Iustin Pop

4158 53c776b5 Iustin Pop
    """
4159 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4160 53c776b5 Iustin Pop
4161 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4162 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4163 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4164 53c776b5 Iustin Pop
    self.nodes_ip = {
4165 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4166 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4167 53c776b5 Iustin Pop
      }
4168 53c776b5 Iustin Pop
    if self.op.cleanup:
4169 53c776b5 Iustin Pop
      return self._ExecCleanup()
4170 53c776b5 Iustin Pop
    else:
4171 53c776b5 Iustin Pop
      return self._ExecMigration()
4172 53c776b5 Iustin Pop
4173 53c776b5 Iustin Pop
4174 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4175 428958aa Iustin Pop
                    info, force_open):
4176 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4177 a8083063 Iustin Pop

4178 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4179 a8083063 Iustin Pop
  all its children.
4180 a8083063 Iustin Pop

4181 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4182 a8083063 Iustin Pop

4183 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4184 428958aa Iustin Pop
  @param node: the node on which to create the device
4185 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4186 428958aa Iustin Pop
  @param instance: the instance which owns the device
4187 428958aa Iustin Pop
  @type device: L{objects.Disk}
4188 428958aa Iustin Pop
  @param device: the device to create
4189 428958aa Iustin Pop
  @type force_create: boolean
4190 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4191 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4192 428958aa Iustin Pop
      CreateOnSecondary() attribute
4193 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4194 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4195 428958aa Iustin Pop
  @type force_open: boolean
4196 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4197 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4198 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4199 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4200 428958aa Iustin Pop

4201 a8083063 Iustin Pop
  """
4202 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4203 428958aa Iustin Pop
    force_create = True
4204 796cab27 Iustin Pop
4205 a8083063 Iustin Pop
  if device.children:
4206 a8083063 Iustin Pop
    for child in device.children:
4207 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4208 428958aa Iustin Pop
                      info, force_open)
4209 a8083063 Iustin Pop
4210 428958aa Iustin Pop
  if not force_create:
4211 796cab27 Iustin Pop
    return
4212 796cab27 Iustin Pop
4213 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4214 de12473a Iustin Pop
4215 de12473a Iustin Pop
4216 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4217 de12473a Iustin Pop
  """Create a single block device on a given node.
4218 de12473a Iustin Pop

4219 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4220 de12473a Iustin Pop
  created in advance.
4221 de12473a Iustin Pop

4222 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4223 de12473a Iustin Pop
  @param node: the node on which to create the device
4224 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4225 de12473a Iustin Pop
  @param instance: the instance which owns the device
4226 de12473a Iustin Pop
  @type device: L{objects.Disk}
4227 de12473a Iustin Pop
  @param device: the device to create
4228 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4229 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4230 de12473a Iustin Pop
  @type force_open: boolean
4231 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4232 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4233 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4234 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4235 de12473a Iustin Pop

4236 de12473a Iustin Pop
  """
4237 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4238 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4239 428958aa Iustin Pop
                                       instance.name, force_open, info)
4240 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
4241 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
4242 a8083063 Iustin Pop
  if device.physical_id is None:
4243 0959c824 Iustin Pop
    device.physical_id = result.payload
4244 a8083063 Iustin Pop
4245 a8083063 Iustin Pop
4246 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4247 923b1523 Iustin Pop
  """Generate a suitable LV name.
4248 923b1523 Iustin Pop

4249 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4250 923b1523 Iustin Pop

4251 923b1523 Iustin Pop
  """
4252 923b1523 Iustin Pop
  results = []
4253 923b1523 Iustin Pop
  for val in exts:
4254 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4255 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4256 923b1523 Iustin Pop
  return results
4257 923b1523 Iustin Pop
4258 923b1523 Iustin Pop
4259 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4260 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4261 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4262 a1f445d3 Iustin Pop

4263 a1f445d3 Iustin Pop
  """
4264 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4265 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4266 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4267 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4268 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4269 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4270 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4271 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4272 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4273 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4274 f9518d38 Iustin Pop
                                      shared_secret),
4275 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4276 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4277 a1f445d3 Iustin Pop
  return drbd_dev
4278 a1f445d3 Iustin Pop
4279 7c0d6283 Michael Hanselmann
4280 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4281 a8083063 Iustin Pop
                          instance_name, primary_node,
4282 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4283 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4284 e2a65344 Iustin Pop
                          base_index):
4285 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4286 a8083063 Iustin Pop

4287 a8083063 Iustin Pop
  """
4288 a8083063 Iustin Pop
  #TODO: compute space requirements
4289 a8083063 Iustin Pop
4290 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4291 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4292 08db7c5c Iustin Pop
  disks = []
4293 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4294 08db7c5c Iustin Pop
    pass
4295 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4296 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4297 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4298 923b1523 Iustin Pop
4299 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4300 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4301 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4302 e2a65344 Iustin Pop
      disk_index = idx + base_index
4303 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4304 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4305 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4306 6ec66eae Iustin Pop
                              mode=disk["mode"])
4307 08db7c5c Iustin Pop
      disks.append(disk_dev)
4308 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4309 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4310 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4311 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4312 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4313 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4314 08db7c5c Iustin Pop
4315 e6c1ff2f Iustin Pop
    names = []
4316 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4317 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4318 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4319 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4320 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4321 112050d9 Iustin Pop
      disk_index = idx + base_index
4322 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4323 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4324 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4325 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4326 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4327 08db7c5c Iustin Pop
      disks.append(disk_dev)
4328 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4329 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4330 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4331 0f1a06e3 Manuel Franceschini
4332 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4333 112050d9 Iustin Pop
      disk_index = idx + base_index
4334 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4335 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4336 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4337 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4338 43e99cff Guido Trotter
                                                         disk_index)),
4339 6ec66eae Iustin Pop
                              mode=disk["mode"])
4340 08db7c5c Iustin Pop
      disks.append(disk_dev)
4341 a8083063 Iustin Pop
  else:
4342 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4343 a8083063 Iustin Pop
  return disks
4344 a8083063 Iustin Pop
4345 a8083063 Iustin Pop
4346 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4347 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4348 3ecf6786 Iustin Pop

4349 3ecf6786 Iustin Pop
  """
4350 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4351 a0c3fea1 Michael Hanselmann
4352 a0c3fea1 Michael Hanselmann
4353 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4354 a8083063 Iustin Pop
  """Create all disks for an instance.
4355 a8083063 Iustin Pop

4356 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4357 a8083063 Iustin Pop

4358 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4359 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4360 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4361 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4362 e4376078 Iustin Pop
  @rtype: boolean
4363 e4376078 Iustin Pop
  @return: the success of the creation
4364 a8083063 Iustin Pop

4365 a8083063 Iustin Pop
  """
4366 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4367 428958aa Iustin Pop
  pnode = instance.primary_node
4368 a0c3fea1 Michael Hanselmann
4369 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4370 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4371 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4372 0f1a06e3 Manuel Franceschini
4373 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
4374 4c4e4e1e Iustin Pop
                 " node %s: %s" % (file_storage_dir, pnode))
4375 0f1a06e3 Manuel Franceschini
4376 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4377 24991749 Iustin Pop
  # LUSetInstanceParams
4378 a8083063 Iustin Pop
  for device in instance.disks:
4379 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4380 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4381 a8083063 Iustin Pop
    #HARDCODE
4382 428958aa Iustin Pop
    for node in instance.all_nodes:
4383 428958aa Iustin Pop
      f_create = node == pnode
4384 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4385 a8083063 Iustin Pop
4386 a8083063 Iustin Pop
4387 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4388 a8083063 Iustin Pop
  """Remove all disks for an instance.
4389 a8083063 Iustin Pop

4390 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4391 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4392 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4393 a8083063 Iustin Pop
  with `_CreateDisks()`).
4394 a8083063 Iustin Pop

4395 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4396 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4397 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4398 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4399 e4376078 Iustin Pop
  @rtype: boolean
4400 e4376078 Iustin Pop
  @return: the success of the removal
4401 a8083063 Iustin Pop

4402 a8083063 Iustin Pop
  """
4403 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4404 a8083063 Iustin Pop
4405 e1bc0878 Iustin Pop
  all_result = True
4406 a8083063 Iustin Pop
  for device in instance.disks:
4407 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4408 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4409 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4410 e1bc0878 Iustin Pop
      if msg:
4411 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4412 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4413 e1bc0878 Iustin Pop
        all_result = False
4414 0f1a06e3 Manuel Franceschini
4415 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4416 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4417 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4418 781de953 Iustin Pop
                                                 file_storage_dir)
4419 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4420 b2b8bcce Iustin Pop
    if msg:
4421 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4422 b2b8bcce Iustin Pop
                    file_storage_dir, instance.primary_node, msg)
4423 e1bc0878 Iustin Pop
      all_result = False
4424 0f1a06e3 Manuel Franceschini
4425 e1bc0878 Iustin Pop
  return all_result
4426 a8083063 Iustin Pop
4427 a8083063 Iustin Pop
4428 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4429 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4430 e2fe6369 Iustin Pop

4431 e2fe6369 Iustin Pop
  """
4432 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4433 e2fe6369 Iustin Pop
  req_size_dict = {
4434 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4435 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4436 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4437 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4438 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4439 e2fe6369 Iustin Pop
  }
4440 e2fe6369 Iustin Pop
4441 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4442 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4443 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4444 e2fe6369 Iustin Pop
4445 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4446 e2fe6369 Iustin Pop
4447 e2fe6369 Iustin Pop
4448 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4449 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4450 74409b12 Iustin Pop

4451 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4452 74409b12 Iustin Pop
  used in both instance create and instance modify.
4453 74409b12 Iustin Pop

4454 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4455 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4456 74409b12 Iustin Pop
  @type nodenames: list
4457 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4458 74409b12 Iustin Pop
  @type hvname: string
4459 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4460 74409b12 Iustin Pop
  @type hvparams: dict
4461 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4462 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4463 74409b12 Iustin Pop

4464 74409b12 Iustin Pop
  """
4465 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4466 74409b12 Iustin Pop
                                                  hvname,
4467 74409b12 Iustin Pop
                                                  hvparams)
4468 74409b12 Iustin Pop
  for node in nodenames:
4469 781de953 Iustin Pop
    info = hvinfo[node]
4470 68c6f21c Iustin Pop
    if info.offline:
4471 68c6f21c Iustin Pop
      continue
4472 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4473 74409b12 Iustin Pop
4474 74409b12 Iustin Pop
4475 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4476 a8083063 Iustin Pop
  """Create an instance.
4477 a8083063 Iustin Pop

4478 a8083063 Iustin Pop
  """
4479 a8083063 Iustin Pop
  HPATH = "instance-add"
4480 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4481 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4482 08db7c5c Iustin Pop
              "mode", "start",
4483 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4484 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4485 7baf741d Guido Trotter
  REQ_BGL = False
4486 7baf741d Guido Trotter
4487 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4488 7baf741d Guido Trotter
    """Expands and checks one node name.
4489 7baf741d Guido Trotter

4490 7baf741d Guido Trotter
    """
4491 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4492 7baf741d Guido Trotter
    if node_full is None:
4493 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4494 7baf741d Guido Trotter
    return node_full
4495 7baf741d Guido Trotter
4496 7baf741d Guido Trotter
  def ExpandNames(self):
4497 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4498 7baf741d Guido Trotter

4499 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4500 7baf741d Guido Trotter

4501 7baf741d Guido Trotter
    """
4502 7baf741d Guido Trotter
    self.needed_locks = {}
4503 7baf741d Guido Trotter
4504 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4505 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4506 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4507 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4508 7baf741d Guido Trotter
4509 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4510 4b2f38dd Iustin Pop
4511 7baf741d Guido Trotter
    # verify creation mode
4512 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4513 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4514 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4515 7baf741d Guido Trotter
                                 self.op.mode)
4516 4b2f38dd Iustin Pop
4517 7baf741d Guido Trotter
    # disk template and mirror node verification
4518 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4519 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4520 7baf741d Guido Trotter
4521 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4522 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4523 4b2f38dd Iustin Pop
4524 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4525 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4526 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4527 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4528 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4529 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4530 4b2f38dd Iustin Pop
4531 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4532 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4533 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4534 8705eb96 Iustin Pop
                                  self.op.hvparams)
4535 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4536 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4537 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4538 6785674e Iustin Pop
4539 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4540 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4541 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4542 338e51e8 Iustin Pop
                                    self.op.beparams)
4543 338e51e8 Iustin Pop
4544 7baf741d Guido Trotter
    #### instance parameters check
4545 7baf741d Guido Trotter
4546 7baf741d Guido Trotter
    # instance name verification
4547 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4548 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4549 7baf741d Guido Trotter
4550 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4551 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4552 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4553 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4554 7baf741d Guido Trotter
                                 instance_name)
4555 7baf741d Guido Trotter
4556 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4557 7baf741d Guido Trotter
4558 08db7c5c Iustin Pop
    # NIC buildup
4559 08db7c5c Iustin Pop
    self.nics = []
4560 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
4561 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
4562 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
4563 9dce4771 Guido Trotter
      if nic_mode is None:
4564 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4565 9dce4771 Guido Trotter
4566 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
4567 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4568 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
4569 9dce4771 Guido Trotter
      else:
4570 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
4571 9dce4771 Guido Trotter
4572 08db7c5c Iustin Pop
      # ip validity checks
4573 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
4574 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
4575 08db7c5c Iustin Pop
        nic_ip = None
4576 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4577 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4578 08db7c5c Iustin Pop
      else:
4579 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4580 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4581 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4582 08db7c5c Iustin Pop
        nic_ip = ip
4583 08db7c5c Iustin Pop
4584 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
4585 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4586 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4587 9dce4771 Guido Trotter
4588 08db7c5c Iustin Pop
      # MAC address verification
4589 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4590 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4591 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4592 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4593 08db7c5c Iustin Pop
                                     mac)
4594 08db7c5c Iustin Pop
      # bridge verification
4595 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4596 9dce4771 Guido Trotter
      link = nic.get("link", None)
4597 9dce4771 Guido Trotter
      if bridge and link:
4598 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
4599 29921401 Iustin Pop
                                   " at the same time")
4600 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4601 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4602 9dce4771 Guido Trotter
      elif bridge:
4603 9dce4771 Guido Trotter
        link = bridge
4604 9dce4771 Guido Trotter
4605 9dce4771 Guido Trotter
      nicparams = {}
4606 9dce4771 Guido Trotter
      if nic_mode_req:
4607 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
4608 9dce4771 Guido Trotter
      if link:
4609 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
4610 9dce4771 Guido Trotter
4611 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4612 9dce4771 Guido Trotter
                                      nicparams)
4613 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
4614 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4615 08db7c5c Iustin Pop
4616 08db7c5c Iustin Pop
    # disk checks/pre-build
4617 08db7c5c Iustin Pop
    self.disks = []
4618 08db7c5c Iustin Pop
    for disk in self.op.disks:
4619 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4620 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4621 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4622 08db7c5c Iustin Pop
                                   mode)
4623 08db7c5c Iustin Pop
      size = disk.get("size", None)
4624 08db7c5c Iustin Pop
      if size is None:
4625 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4626 08db7c5c Iustin Pop
      try:
4627 08db7c5c Iustin Pop
        size = int(size)
4628 08db7c5c Iustin Pop
      except ValueError:
4629 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4630 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4631 08db7c5c Iustin Pop
4632 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4633 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4634 7baf741d Guido Trotter
4635 7baf741d Guido Trotter
    # file storage checks
4636 7baf741d Guido Trotter
    if (self.op.file_driver and
4637 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4638 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4639 7baf741d Guido Trotter
                                 self.op.file_driver)
4640 7baf741d Guido Trotter
4641 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4642 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4643 7baf741d Guido Trotter
4644 7baf741d Guido Trotter
    ### Node/iallocator related checks
4645 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4646 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4647 7baf741d Guido Trotter
                                 " node must be given")
4648 7baf741d Guido Trotter
4649 7baf741d Guido Trotter
    if self.op.iallocator:
4650 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4651 7baf741d Guido Trotter
    else:
4652 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4653 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4654 7baf741d Guido Trotter
      if self.op.snode is not None:
4655 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4656 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4657 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4658 7baf741d Guido Trotter
4659 7baf741d Guido Trotter
    # in case of import lock the source node too
4660 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4661 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4662 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4663 7baf741d Guido Trotter
4664 b9322a9f Guido Trotter
      if src_path is None:
4665 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4666 b9322a9f Guido Trotter
4667 b9322a9f Guido Trotter
      if src_node is None:
4668 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4669 b9322a9f Guido Trotter
        self.op.src_node = None
4670 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4671 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4672 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4673 b9322a9f Guido Trotter
      else:
4674 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4675 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4676 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4677 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4678 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4679 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4680 7baf741d Guido Trotter
4681 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4682 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4683 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4684 a8083063 Iustin Pop
4685 538475ca Iustin Pop
  def _RunAllocator(self):
4686 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4687 538475ca Iustin Pop

4688 538475ca Iustin Pop
    """
4689 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4690 923ddac0 Michael Hanselmann
    ial = IAllocator(self.cfg, self.rpc,
4691 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4692 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4693 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4694 d1c2dd75 Iustin Pop
                     tags=[],
4695 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4696 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4697 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4698 08db7c5c Iustin Pop
                     disks=self.disks,
4699 d1c2dd75 Iustin Pop
                     nics=nics,
4700 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4701 29859cb7 Iustin Pop
                     )
4702 d1c2dd75 Iustin Pop
4703 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4704 d1c2dd75 Iustin Pop
4705 d1c2dd75 Iustin Pop
    if not ial.success:
4706 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4707 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4708 d1c2dd75 Iustin Pop
                                                           ial.info))
4709 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4710 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4711 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4712 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4713 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4714 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4715 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4716 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4717 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4718 27579978 Iustin Pop
    if ial.required_nodes == 2:
4719 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4720 538475ca Iustin Pop
4721 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4722 a8083063 Iustin Pop
    """Build hooks env.
4723 a8083063 Iustin Pop

4724 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4725 a8083063 Iustin Pop

4726 a8083063 Iustin Pop
    """
4727 a8083063 Iustin Pop
    env = {
4728 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4729 a8083063 Iustin Pop
      }
4730 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4731 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4732 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4733 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4734 396e1b78 Michael Hanselmann
4735 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4736 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4737 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4738 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4739 4978db17 Iustin Pop
      status=self.op.start,
4740 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4741 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4742 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4743 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
4744 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4745 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4746 67fc3042 Iustin Pop
      bep=self.be_full,
4747 67fc3042 Iustin Pop
      hvp=self.hv_full,
4748 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
4749 396e1b78 Michael Hanselmann
    ))
4750 a8083063 Iustin Pop
4751 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4752 a8083063 Iustin Pop
          self.secondaries)
4753 a8083063 Iustin Pop
    return env, nl, nl
4754 a8083063 Iustin Pop
4755 a8083063 Iustin Pop
4756 a8083063 Iustin Pop
  def CheckPrereq(self):
4757 a8083063 Iustin Pop
    """Check prerequisites.
4758 a8083063 Iustin Pop

4759 a8083063 Iustin Pop
    """
4760 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4761 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4762 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4763 eedc99de Manuel Franceschini
                                 " instances")
4764 eedc99de Manuel Franceschini
4765 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4766 7baf741d Guido Trotter
      src_node = self.op.src_node
4767 7baf741d Guido Trotter
      src_path = self.op.src_path
4768 a8083063 Iustin Pop
4769 c0cbdc67 Guido Trotter
      if src_node is None:
4770 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4771 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
4772 c0cbdc67 Guido Trotter
        found = False
4773 c0cbdc67 Guido Trotter
        for node in exp_list:
4774 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
4775 1b7bfbb7 Iustin Pop
            continue
4776 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
4777 c0cbdc67 Guido Trotter
            found = True
4778 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4779 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4780 c0cbdc67 Guido Trotter
                                                       src_path)
4781 c0cbdc67 Guido Trotter
            break
4782 c0cbdc67 Guido Trotter
        if not found:
4783 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4784 c0cbdc67 Guido Trotter
                                      src_path)
4785 c0cbdc67 Guido Trotter
4786 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4787 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4788 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
4789 a8083063 Iustin Pop
4790 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4791 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4792 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4793 a8083063 Iustin Pop
4794 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4795 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4796 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4797 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4798 a8083063 Iustin Pop
4799 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4800 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4801 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4802 09acf207 Guido Trotter
      if instance_disks < export_disks:
4803 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4804 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4805 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4806 a8083063 Iustin Pop
4807 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4808 09acf207 Guido Trotter
      disk_images = []
4809 09acf207 Guido Trotter
      for idx in range(export_disks):
4810 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4811 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4812 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4813 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4814 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4815 09acf207 Guido Trotter
          disk_images.append(image)
4816 09acf207 Guido Trotter
        else:
4817 09acf207 Guido Trotter
          disk_images.append(False)
4818 09acf207 Guido Trotter
4819 09acf207 Guido Trotter
      self.src_images = disk_images
4820 901a65c1 Iustin Pop
4821 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4822 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4823 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4824 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4825 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4826 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4827 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4828 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4829 bc89efc3 Guido Trotter
4830 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4831 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4832 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4833 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4834 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4835 901a65c1 Iustin Pop
4836 901a65c1 Iustin Pop
    if self.op.ip_check:
4837 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4838 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4839 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4840 901a65c1 Iustin Pop
4841 295728df Guido Trotter
    #### mac address generation
4842 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4843 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4844 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4845 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4846 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4847 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4848 295728df Guido Trotter
    # creation job will fail.
4849 295728df Guido Trotter
    for nic in self.nics:
4850 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4851 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4852 295728df Guido Trotter
4853 538475ca Iustin Pop
    #### allocator run
4854 538475ca Iustin Pop
4855 538475ca Iustin Pop
    if self.op.iallocator is not None:
4856 538475ca Iustin Pop
      self._RunAllocator()
4857 0f1a06e3 Manuel Franceschini
4858 901a65c1 Iustin Pop
    #### node related checks
4859 901a65c1 Iustin Pop
4860 901a65c1 Iustin Pop
    # check primary node
4861 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4862 7baf741d Guido Trotter
    assert self.pnode is not None, \
4863 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4864 7527a8a4 Iustin Pop
    if pnode.offline:
4865 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4866 7527a8a4 Iustin Pop
                                 pnode.name)
4867 733a2b6a Iustin Pop
    if pnode.drained:
4868 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4869 733a2b6a Iustin Pop
                                 pnode.name)
4870 7527a8a4 Iustin Pop
4871 901a65c1 Iustin Pop
    self.secondaries = []
4872 901a65c1 Iustin Pop
4873 901a65c1 Iustin Pop
    # mirror node verification
4874 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4875 7baf741d Guido Trotter
      if self.op.snode is None:
4876 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4877 3ecf6786 Iustin Pop
                                   " a mirror node")
4878 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4879 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4880 3ecf6786 Iustin Pop
                                   " the primary node.")
4881 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4882 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4883 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4884 a8083063 Iustin Pop
4885 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4886 6785674e Iustin Pop
4887 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4888 08db7c5c Iustin Pop
                                self.disks)
4889 ed1ebc60 Guido Trotter
4890 8d75db10 Iustin Pop
    # Check lv size requirements
4891 8d75db10 Iustin Pop
    if req_size is not None:
4892 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4893 72737a7f Iustin Pop
                                         self.op.hypervisor)
4894 8d75db10 Iustin Pop
      for node in nodenames:
4895 781de953 Iustin Pop
        info = nodeinfo[node]
4896 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
4897 070e998b Iustin Pop
        info = info.payload
4898 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4899 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4900 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4901 8d75db10 Iustin Pop
                                     " node %s" % node)
4902 070e998b Iustin Pop
        if req_size > vg_free:
4903 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4904 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4905 070e998b Iustin Pop
                                     (node, vg_free, req_size))
4906 ed1ebc60 Guido Trotter
4907 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4908 6785674e Iustin Pop
4909 a8083063 Iustin Pop
    # os verification
4910 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4911 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4912 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
4913 a8083063 Iustin Pop
4914 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4915 a8083063 Iustin Pop
4916 49ce1563 Iustin Pop
    # memory check on primary node
4917 49ce1563 Iustin Pop
    if self.op.start:
4918 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4919 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4920 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4921 338e51e8 Iustin Pop
                           self.op.hypervisor)
4922 49ce1563 Iustin Pop
4923 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
4924 08896026 Iustin Pop
4925 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4926 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4927 a8083063 Iustin Pop

4928 a8083063 Iustin Pop
    """
4929 a8083063 Iustin Pop
    instance = self.op.instance_name
4930 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4931 a8083063 Iustin Pop
4932 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4933 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4934 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4935 2a6469d5 Alexander Schreiber
    else:
4936 2a6469d5 Alexander Schreiber
      network_port = None
4937 58acb49d Alexander Schreiber
4938 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4939 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4940 31a853d2 Iustin Pop
4941 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4942 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4943 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4944 2c313123 Manuel Franceschini
    else:
4945 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4946 2c313123 Manuel Franceschini
4947 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4948 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4949 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4950 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4951 0f1a06e3 Manuel Franceschini
4952 0f1a06e3 Manuel Franceschini
4953 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4954 a8083063 Iustin Pop
                                  self.op.disk_template,
4955 a8083063 Iustin Pop
                                  instance, pnode_name,
4956 08db7c5c Iustin Pop
                                  self.secondaries,
4957 08db7c5c Iustin Pop
                                  self.disks,
4958 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4959 e2a65344 Iustin Pop
                                  self.op.file_driver,
4960 e2a65344 Iustin Pop
                                  0)
4961 a8083063 Iustin Pop
4962 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4963 a8083063 Iustin Pop
                            primary_node=pnode_name,
4964 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4965 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4966 4978db17 Iustin Pop
                            admin_up=False,
4967 58acb49d Alexander Schreiber
                            network_port=network_port,
4968 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4969 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4970 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4971 a8083063 Iustin Pop
                            )
4972 a8083063 Iustin Pop
4973 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4974 796cab27 Iustin Pop
    try:
4975 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4976 796cab27 Iustin Pop
    except errors.OpExecError:
4977 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4978 796cab27 Iustin Pop
      try:
4979 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4980 796cab27 Iustin Pop
      finally:
4981 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4982 796cab27 Iustin Pop
        raise
4983 a8083063 Iustin Pop
4984 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4985 a8083063 Iustin Pop
4986 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4987 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4988 7baf741d Guido Trotter
    # added the instance to the config
4989 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4990 e36e96b4 Guido Trotter
    # Unlock all the nodes
4991 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4992 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4993 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4994 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4995 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4996 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4997 9c8971d7 Guido Trotter
    else:
4998 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4999 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5000 a8083063 Iustin Pop
5001 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5002 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5003 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5004 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5005 a8083063 Iustin Pop
      time.sleep(15)
5006 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5007 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5008 a8083063 Iustin Pop
    else:
5009 a8083063 Iustin Pop
      disk_abort = False
5010 a8083063 Iustin Pop
5011 a8083063 Iustin Pop
    if disk_abort:
5012 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5013 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5014 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5015 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5016 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5017 3ecf6786 Iustin Pop
                               " this instance")
5018 a8083063 Iustin Pop
5019 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5020 a8083063 Iustin Pop
                (instance, pnode_name))
5021 a8083063 Iustin Pop
5022 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5023 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5024 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5025 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
5026 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
5027 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
5028 a8083063 Iustin Pop
5029 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5030 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5031 a8083063 Iustin Pop
        src_node = self.op.src_node
5032 09acf207 Guido Trotter
        src_images = self.src_images
5033 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5034 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5035 09acf207 Guido Trotter
                                                         src_node, src_images,
5036 6c0af70e Guido Trotter
                                                         cluster_name)
5037 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
5038 944bf548 Iustin Pop
        if msg:
5039 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
5040 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
5041 a8083063 Iustin Pop
      else:
5042 a8083063 Iustin Pop
        # also checked in the prereq part
5043 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5044 3ecf6786 Iustin Pop
                                     % self.op.mode)
5045 a8083063 Iustin Pop
5046 a8083063 Iustin Pop
    if self.op.start:
5047 4978db17 Iustin Pop
      iobj.admin_up = True
5048 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5049 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5050 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5051 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5052 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
5053 a8083063 Iustin Pop
5054 08896026 Iustin Pop
    return list(iobj.all_nodes)
5055 08896026 Iustin Pop
5056 a8083063 Iustin Pop
5057 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5058 a8083063 Iustin Pop
  """Connect to an instance's console.
5059 a8083063 Iustin Pop

5060 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5061 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5062 a8083063 Iustin Pop
  console.
5063 a8083063 Iustin Pop

5064 a8083063 Iustin Pop
  """
5065 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5066 8659b73e Guido Trotter
  REQ_BGL = False
5067 8659b73e Guido Trotter
5068 8659b73e Guido Trotter
  def ExpandNames(self):
5069 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5070 a8083063 Iustin Pop
5071 a8083063 Iustin Pop
  def CheckPrereq(self):
5072 a8083063 Iustin Pop
    """Check prerequisites.
5073 a8083063 Iustin Pop

5074 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5075 a8083063 Iustin Pop

5076 a8083063 Iustin Pop
    """
5077 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5078 8659b73e Guido Trotter
    assert self.instance is not None, \
5079 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5080 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5081 a8083063 Iustin Pop
5082 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5083 a8083063 Iustin Pop
    """Connect to the console of an instance
5084 a8083063 Iustin Pop

5085 a8083063 Iustin Pop
    """
5086 a8083063 Iustin Pop
    instance = self.instance
5087 a8083063 Iustin Pop
    node = instance.primary_node
5088 a8083063 Iustin Pop
5089 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5090 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5091 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
5092 a8083063 Iustin Pop
5093 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
5094 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5095 a8083063 Iustin Pop
5096 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5097 a8083063 Iustin Pop
5098 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5099 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5100 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5101 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5102 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5103 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5104 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5105 b047857b Michael Hanselmann
5106 82122173 Iustin Pop
    # build ssh cmdline
5107 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5108 a8083063 Iustin Pop
5109 a8083063 Iustin Pop
5110 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5111 a8083063 Iustin Pop
  """Replace the disks of an instance.
5112 a8083063 Iustin Pop

5113 a8083063 Iustin Pop
  """
5114 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5115 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5116 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5117 efd990e4 Guido Trotter
  REQ_BGL = False
5118 efd990e4 Guido Trotter
5119 7e9366f7 Iustin Pop
  def CheckArguments(self):
5120 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5121 efd990e4 Guido Trotter
      self.op.remote_node = None
5122 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5123 7e9366f7 Iustin Pop
      self.op.iallocator = None
5124 7e9366f7 Iustin Pop
5125 2bb5c911 Michael Hanselmann
    _DiskReplacer.CheckArguments(self.op.mode, self.op.remote_node,
5126 2bb5c911 Michael Hanselmann
                                 self.op.iallocator)
5127 7e9366f7 Iustin Pop
5128 7e9366f7 Iustin Pop
  def ExpandNames(self):
5129 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5130 7e9366f7 Iustin Pop
5131 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5132 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5133 2bb5c911 Michael Hanselmann
5134 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5135 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5136 efd990e4 Guido Trotter
      if remote_node is None:
5137 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5138 efd990e4 Guido Trotter
                                   self.op.remote_node)
5139 2bb5c911 Michael Hanselmann
5140 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5141 2bb5c911 Michael Hanselmann
5142 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5143 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5144 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5145 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5146 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5147 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5148 2bb5c911 Michael Hanselmann
5149 efd990e4 Guido Trotter
    else:
5150 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5151 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5152 efd990e4 Guido Trotter
5153 2bb5c911 Michael Hanselmann
    self.replacer = _DiskReplacer(self, self.op.instance_name, self.op.mode,
5154 2bb5c911 Michael Hanselmann
                                  self.op.iallocator, self.op.remote_node,
5155 2bb5c911 Michael Hanselmann
                                  self.op.disks)
5156 2bb5c911 Michael Hanselmann
5157 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5158 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5159 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5160 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5161 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5162 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5163 a8083063 Iustin Pop
5164 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5165 a8083063 Iustin Pop
    """Build hooks env.
5166 a8083063 Iustin Pop

5167 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5168 a8083063 Iustin Pop

5169 a8083063 Iustin Pop
    """
5170 2bb5c911 Michael Hanselmann
    instance = self.replacer.instance
5171 a8083063 Iustin Pop
    env = {
5172 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5173 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5174 2bb5c911 Michael Hanselmann
      "OLD_SECONDARY": instance.secondary_nodes[0],
5175 a8083063 Iustin Pop
      }
5176 2bb5c911 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self, instance))
5177 0834c866 Iustin Pop
    nl = [
5178 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5179 2bb5c911 Michael Hanselmann
      instance.primary_node,
5180 0834c866 Iustin Pop
      ]
5181 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5182 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5183 a8083063 Iustin Pop
    return env, nl, nl
5184 a8083063 Iustin Pop
5185 a8083063 Iustin Pop
  def CheckPrereq(self):
5186 a8083063 Iustin Pop
    """Check prerequisites.
5187 a8083063 Iustin Pop

5188 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5189 a8083063 Iustin Pop

5190 a8083063 Iustin Pop
    """
5191 2bb5c911 Michael Hanselmann
    self.replacer.CheckPrereq()
5192 a8083063 Iustin Pop
5193 2bb5c911 Michael Hanselmann
  def Exec(self, feedback_fn):
5194 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
5195 2bb5c911 Michael Hanselmann

5196 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
5197 2bb5c911 Michael Hanselmann

5198 2bb5c911 Michael Hanselmann
    """
5199 2bb5c911 Michael Hanselmann
    self.replacer.Exec()
5200 2bb5c911 Michael Hanselmann
5201 2bb5c911 Michael Hanselmann
5202 2bb5c911 Michael Hanselmann
class _DiskReplacer:
5203 2bb5c911 Michael Hanselmann
  """Replaces disks for an instance.
5204 2bb5c911 Michael Hanselmann

5205 2bb5c911 Michael Hanselmann
  Note: Locking is not within the scope of this class.
5206 2bb5c911 Michael Hanselmann

5207 2bb5c911 Michael Hanselmann
  """
5208 2bb5c911 Michael Hanselmann
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
5209 2bb5c911 Michael Hanselmann
               disks):
5210 2bb5c911 Michael Hanselmann
    """Initializes this class.
5211 2bb5c911 Michael Hanselmann

5212 2bb5c911 Michael Hanselmann
    """
5213 2bb5c911 Michael Hanselmann
    # Parameters
5214 2bb5c911 Michael Hanselmann
    self.lu = lu
5215 2bb5c911 Michael Hanselmann
    self.instance_name = instance_name
5216 2bb5c911 Michael Hanselmann
    self.mode = mode
5217 2bb5c911 Michael Hanselmann
    self.iallocator_name = iallocator_name
5218 2bb5c911 Michael Hanselmann
    self.remote_node = remote_node
5219 2bb5c911 Michael Hanselmann
    self.disks = disks
5220 2bb5c911 Michael Hanselmann
5221 2bb5c911 Michael Hanselmann
    # Shortcuts
5222 2bb5c911 Michael Hanselmann
    self.cfg = lu.cfg
5223 2bb5c911 Michael Hanselmann
    self.rpc = lu.rpc
5224 2bb5c911 Michael Hanselmann
5225 2bb5c911 Michael Hanselmann
    # Runtime data
5226 2bb5c911 Michael Hanselmann
    self.instance = None
5227 2bb5c911 Michael Hanselmann
    self.new_node = None
5228 2bb5c911 Michael Hanselmann
    self.target_node = None
5229 2bb5c911 Michael Hanselmann
    self.other_node = None
5230 2bb5c911 Michael Hanselmann
    self.remote_node_info = None
5231 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = None
5232 2bb5c911 Michael Hanselmann
5233 2bb5c911 Michael Hanselmann
  @staticmethod
5234 2bb5c911 Michael Hanselmann
  def CheckArguments(mode, remote_node, iallocator):
5235 2bb5c911 Michael Hanselmann
    # check for valid parameter combination
5236 2bb5c911 Michael Hanselmann
    cnt = [remote_node, iallocator].count(None)
5237 2bb5c911 Michael Hanselmann
    if mode == constants.REPLACE_DISK_CHG:
5238 2bb5c911 Michael Hanselmann
      if cnt == 2:
5239 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("When changing the secondary either an"
5240 2bb5c911 Michael Hanselmann
                                   " iallocator script must be used or the"
5241 2bb5c911 Michael Hanselmann
                                   " new node given")
5242 2bb5c911 Michael Hanselmann
      elif cnt == 0:
5243 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("Give either the iallocator or the new"
5244 2bb5c911 Michael Hanselmann
                                   " secondary, not both")
5245 2bb5c911 Michael Hanselmann
    else: # not replacing the secondary
5246 2bb5c911 Michael Hanselmann
      if cnt != 2:
5247 2bb5c911 Michael Hanselmann
        raise errors.OpPrereqError("The iallocator and new node options can"
5248 2bb5c911 Michael Hanselmann
                                   " be used only when changing the"
5249 2bb5c911 Michael Hanselmann
                                   " secondary node")
5250 2bb5c911 Michael Hanselmann
5251 2bb5c911 Michael Hanselmann
  @staticmethod
5252 2bb5c911 Michael Hanselmann
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
5253 2bb5c911 Michael Hanselmann
    """Compute a new secondary node using an IAllocator.
5254 2bb5c911 Michael Hanselmann

5255 2bb5c911 Michael Hanselmann
    """
5256 2bb5c911 Michael Hanselmann
    ial = IAllocator(lu.cfg, lu.rpc,
5257 2bb5c911 Michael Hanselmann
                     mode=constants.IALLOCATOR_MODE_RELOC,
5258 2bb5c911 Michael Hanselmann
                     name=instance_name,
5259 2bb5c911 Michael Hanselmann
                     relocate_from=relocate_from)
5260 2bb5c911 Michael Hanselmann
5261 2bb5c911 Michael Hanselmann
    ial.Run(iallocator_name)
5262 2bb5c911 Michael Hanselmann
5263 2bb5c911 Michael Hanselmann
    if not ial.success:
5264 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
5265 2bb5c911 Michael Hanselmann
                                 " %s" % (iallocator_name, ial.info))
5266 2bb5c911 Michael Hanselmann
5267 2bb5c911 Michael Hanselmann
    if len(ial.nodes) != ial.required_nodes:
5268 2bb5c911 Michael Hanselmann
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5269 2bb5c911 Michael Hanselmann
                                 " of nodes (%s), required %s" %
5270 2bb5c911 Michael Hanselmann
                                 (len(ial.nodes), ial.required_nodes))
5271 2bb5c911 Michael Hanselmann
5272 2bb5c911 Michael Hanselmann
    remote_node_name = ial.nodes[0]
5273 2bb5c911 Michael Hanselmann
5274 2bb5c911 Michael Hanselmann
    lu.LogInfo("Selected new secondary for instance '%s': %s",
5275 2bb5c911 Michael Hanselmann
               instance_name, remote_node_name)
5276 2bb5c911 Michael Hanselmann
5277 2bb5c911 Michael Hanselmann
    return remote_node_name
5278 2bb5c911 Michael Hanselmann
5279 2bb5c911 Michael Hanselmann
  def CheckPrereq(self):
5280 2bb5c911 Michael Hanselmann
    """Check prerequisites.
5281 2bb5c911 Michael Hanselmann

5282 2bb5c911 Michael Hanselmann
    This checks that the instance is in the cluster.
5283 2bb5c911 Michael Hanselmann

5284 2bb5c911 Michael Hanselmann
    """
5285 2bb5c911 Michael Hanselmann
    self.instance = self.cfg.GetInstanceInfo(self.instance_name)
5286 2bb5c911 Michael Hanselmann
    assert self.instance is not None, \
5287 2bb5c911 Michael Hanselmann
      "Cannot retrieve locked instance %s" % self.instance_name
5288 2bb5c911 Michael Hanselmann
5289 2bb5c911 Michael Hanselmann
    if self.instance.disk_template != constants.DT_DRBD8:
5290 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5291 7e9366f7 Iustin Pop
                                 " instances")
5292 a8083063 Iustin Pop
5293 2bb5c911 Michael Hanselmann
    if len(self.instance.secondary_nodes) != 1:
5294 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5295 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5296 2bb5c911 Michael Hanselmann
                                 len(self.instance.secondary_nodes))
5297 a8083063 Iustin Pop
5298 2bb5c911 Michael Hanselmann
    secondary_node = self.instance.secondary_nodes[0]
5299 a9e0c397 Iustin Pop
5300 2bb5c911 Michael Hanselmann
    if self.iallocator_name is None:
5301 2bb5c911 Michael Hanselmann
      remote_node = self.remote_node
5302 2bb5c911 Michael Hanselmann
    else:
5303 2bb5c911 Michael Hanselmann
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
5304 2bb5c911 Michael Hanselmann
                                       self.instance.name, secondary_node)
5305 b6e82a65 Iustin Pop
5306 a9e0c397 Iustin Pop
    if remote_node is not None:
5307 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5308 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5309 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5310 a9e0c397 Iustin Pop
    else:
5311 a9e0c397 Iustin Pop
      self.remote_node_info = None
5312 2bb5c911 Michael Hanselmann
5313 2bb5c911 Michael Hanselmann
    if remote_node == self.instance.primary_node:
5314 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5315 3ecf6786 Iustin Pop
                                 " the instance.")
5316 2bb5c911 Michael Hanselmann
5317 2bb5c911 Michael Hanselmann
    if remote_node == secondary_node:
5318 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5319 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5320 7e9366f7 Iustin Pop
5321 2bb5c911 Michael Hanselmann
    if self.mode == constants.REPLACE_DISK_PRI:
5322 2bb5c911 Michael Hanselmann
      self.target_node = self.instance.primary_node
5323 2bb5c911 Michael Hanselmann
      self.other_node = secondary_node
5324 2bb5c911 Michael Hanselmann
      check_nodes = [self.target_node, self.other_node]
5325 7e9366f7 Iustin Pop
5326 2bb5c911 Michael Hanselmann
    elif self.mode == constants.REPLACE_DISK_SEC:
5327 2bb5c911 Michael Hanselmann
      self.target_node = secondary_node
5328 2bb5c911 Michael Hanselmann
      self.other_node = self.instance.primary_node
5329 2bb5c911 Michael Hanselmann
      check_nodes = [self.target_node, self.other_node]
5330 a9e0c397 Iustin Pop
5331 2bb5c911 Michael Hanselmann
    elif self.mode == constants.REPLACE_DISK_CHG:
5332 2bb5c911 Michael Hanselmann
      self.new_node = remote_node
5333 2bb5c911 Michael Hanselmann
      self.other_node = self.instance.primary_node
5334 2bb5c911 Michael Hanselmann
      self.target_node = secondary_node
5335 2bb5c911 Michael Hanselmann
      check_nodes = [self.new_node, self.other_node]
5336 54155f52 Iustin Pop
5337 2bb5c911 Michael Hanselmann
      _CheckNodeNotDrained(self.lu, remote_node)
5338 a8083063 Iustin Pop
5339 2bb5c911 Michael Hanselmann
    else:
5340 2bb5c911 Michael Hanselmann
      raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
5341 2bb5c911 Michael Hanselmann
                                   self.mode)
5342 a9e0c397 Iustin Pop
5343 2bb5c911 Michael Hanselmann
    for node in check_nodes:
5344 2bb5c911 Michael Hanselmann
      _CheckNodeOnline(self.lu, node)
5345 e4376078 Iustin Pop
5346 2bb5c911 Michael Hanselmann
    # If not specified all disks should be replaced
5347 2bb5c911 Michael Hanselmann
    if not self.disks:
5348 2bb5c911 Michael Hanselmann
      self.disks = range(len(self.instance.disks))
5349 e4376078 Iustin Pop
5350 2bb5c911 Michael Hanselmann
    # Check whether disks are valid
5351 2bb5c911 Michael Hanselmann
    for disk_idx in self.disks:
5352 2bb5c911 Michael Hanselmann
      self.instance.FindDisk(disk_idx)
5353 e4376078 Iustin Pop
5354 2bb5c911 Michael Hanselmann
    # Get secondary node IP addresses
5355 2bb5c911 Michael Hanselmann
    node_2nd_ip = {}
5356 e4376078 Iustin Pop
5357 2bb5c911 Michael Hanselmann
    for node_name in [self.target_node, self.other_node, self.new_node]:
5358 2bb5c911 Michael Hanselmann
      if node_name is not None:
5359 2bb5c911 Michael Hanselmann
        node_2nd_ip[node_name] = self.cfg.GetNodeInfo(node_name).secondary_ip
5360 e4376078 Iustin Pop
5361 2bb5c911 Michael Hanselmann
    self.node_secondary_ip = node_2nd_ip
5362 a9e0c397 Iustin Pop
5363 2bb5c911 Michael Hanselmann
  def Exec(self):
5364 2bb5c911 Michael Hanselmann
    """Execute disk replacement.
5365 2bb5c911 Michael Hanselmann

5366 2bb5c911 Michael Hanselmann
    This dispatches the disk replacement to the appropriate handler.
5367 cff90b79 Iustin Pop

5368 a9e0c397 Iustin Pop
    """
5369 2bb5c911 Michael Hanselmann
    activate_disks = (not self.instance.admin_up)
5370 2bb5c911 Michael Hanselmann
5371 2bb5c911 Michael Hanselmann
    # Activate the instance disks if we're replacing them on a down instance
5372 2bb5c911 Michael Hanselmann
    if activate_disks:
5373 2bb5c911 Michael Hanselmann
      _StartInstanceDisks(self.lu, self.instance, True)
5374 2bb5c911 Michael Hanselmann
5375 2bb5c911 Michael Hanselmann
    try:
5376 2bb5c911 Michael Hanselmann
      if self.mode == constants.REPLACE_DISK_CHG:
5377 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8Secondary()
5378 2bb5c911 Michael Hanselmann
      else:
5379 2bb5c911 Michael Hanselmann
        return self._ExecDrbd8DiskOnly()
5380 2bb5c911 Michael Hanselmann
5381 2bb5c911 Michael Hanselmann
    finally:
5382 2bb5c911 Michael Hanselmann
      # Deactivate the instance disks if we're replacing them on a down instance
5383 2bb5c911 Michael Hanselmann
      if activate_disks:
5384 2bb5c911 Michael Hanselmann
        _SafeShutdownInstanceDisks(self.lu, self.instance)
5385 2bb5c911 Michael Hanselmann
5386 2bb5c911 Michael Hanselmann
  def _CheckVolumeGroup(self, nodes):
5387 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Checking volume groups")
5388 2bb5c911 Michael Hanselmann
5389 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5390 cff90b79 Iustin Pop
5391 2bb5c911 Michael Hanselmann
    # Make sure volume group exists on all involved nodes
5392 2bb5c911 Michael Hanselmann
    results = self.rpc.call_vg_list(nodes)
5393 cff90b79 Iustin Pop
    if not results:
5394 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5395 2bb5c911 Michael Hanselmann
5396 2bb5c911 Michael Hanselmann
    for node in nodes:
5397 781de953 Iustin Pop
      res = results[node]
5398 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5399 2bb5c911 Michael Hanselmann
      if vgname not in res.payload:
5400 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
5401 2bb5c911 Michael Hanselmann
                                 (vgname, node))
5402 2bb5c911 Michael Hanselmann
5403 2bb5c911 Michael Hanselmann
  def _CheckDisksExistence(self, nodes):
5404 2bb5c911 Michael Hanselmann
    # Check disk existence
5405 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5406 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
5407 cff90b79 Iustin Pop
        continue
5408 2bb5c911 Michael Hanselmann
5409 2bb5c911 Michael Hanselmann
      for node in nodes:
5410 2bb5c911 Michael Hanselmann
        self.lu.LogInfo("Checking disk/%d on %s" % (idx, node))
5411 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(dev, node)
5412 2bb5c911 Michael Hanselmann
5413 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5414 2bb5c911 Michael Hanselmann
5415 4c4e4e1e Iustin Pop
        msg = result.fail_msg
5416 2bb5c911 Michael Hanselmann
        if msg or not result.payload:
5417 2bb5c911 Michael Hanselmann
          if not msg:
5418 2bb5c911 Michael Hanselmann
            msg = "disk not found"
5419 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5420 23829f6f Iustin Pop
                                   (idx, node, msg))
5421 cff90b79 Iustin Pop
5422 2bb5c911 Michael Hanselmann
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
5423 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5424 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
5425 cff90b79 Iustin Pop
        continue
5426 cff90b79 Iustin Pop
5427 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
5428 2bb5c911 Michael Hanselmann
                      (idx, node_name))
5429 2bb5c911 Michael Hanselmann
5430 2bb5c911 Michael Hanselmann
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
5431 2bb5c911 Michael Hanselmann
                                   ldisk=ldisk):
5432 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
5433 2bb5c911 Michael Hanselmann
                                 " replace disks for instance %s" %
5434 2bb5c911 Michael Hanselmann
                                 (node_name, self.instance.name))
5435 2bb5c911 Michael Hanselmann
5436 2bb5c911 Michael Hanselmann
  def _CreateNewStorage(self, node_name):
5437 2bb5c911 Michael Hanselmann
    vgname = self.cfg.GetVGName()
5438 2bb5c911 Michael Hanselmann
    iv_names = {}
5439 2bb5c911 Michael Hanselmann
5440 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5441 2bb5c911 Michael Hanselmann
      if idx not in self.disks:
5442 a9e0c397 Iustin Pop
        continue
5443 2bb5c911 Michael Hanselmann
5444 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding storage on %s for disk/%d" % (node_name, idx))
5445 2bb5c911 Michael Hanselmann
5446 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
5447 2bb5c911 Michael Hanselmann
5448 2bb5c911 Michael Hanselmann
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
5449 2bb5c911 Michael Hanselmann
      names = _GenerateUniqueNames(self.lu, lv_names)
5450 2bb5c911 Michael Hanselmann
5451 2bb5c911 Michael Hanselmann
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
5452 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5453 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5454 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5455 2bb5c911 Michael Hanselmann
5456 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5457 a9e0c397 Iustin Pop
      old_lvs = dev.children
5458 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5459 2bb5c911 Michael Hanselmann
5460 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5461 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5462 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, node_name, self.instance, new_lv, True,
5463 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
5464 2bb5c911 Michael Hanselmann
5465 2bb5c911 Michael Hanselmann
    return iv_names
5466 2bb5c911 Michael Hanselmann
5467 2bb5c911 Michael Hanselmann
  def _CheckDevices(self, node_name, iv_names):
5468 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5469 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, node_name)
5470 2bb5c911 Michael Hanselmann
5471 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_find(node_name, dev)
5472 2bb5c911 Michael Hanselmann
5473 2bb5c911 Michael Hanselmann
      msg = result.fail_msg
5474 2bb5c911 Michael Hanselmann
      if msg or not result.payload:
5475 2bb5c911 Michael Hanselmann
        if not msg:
5476 2bb5c911 Michael Hanselmann
          msg = "disk not found"
5477 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5478 2bb5c911 Michael Hanselmann
                                 (name, msg))
5479 2bb5c911 Michael Hanselmann
5480 2bb5c911 Michael Hanselmann
      if result.payload[5]:
5481 2bb5c911 Michael Hanselmann
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5482 2bb5c911 Michael Hanselmann
5483 2bb5c911 Michael Hanselmann
  def _RemoveOldStorage(self, node_name, iv_names):
5484 2bb5c911 Michael Hanselmann
    for name, (dev, old_lvs, _) in iv_names.iteritems():
5485 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Remove logical volumes for %s" % name)
5486 2bb5c911 Michael Hanselmann
5487 2bb5c911 Michael Hanselmann
      for lv in old_lvs:
5488 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(lv, node_name)
5489 2bb5c911 Michael Hanselmann
5490 2bb5c911 Michael Hanselmann
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
5491 2bb5c911 Michael Hanselmann
        if msg:
5492 2bb5c911 Michael Hanselmann
          self.lu.LogWarning("Can't remove old LV: %s" % msg,
5493 2bb5c911 Michael Hanselmann
                             hint="remove unused LVs manually")
5494 2bb5c911 Michael Hanselmann
5495 2bb5c911 Michael Hanselmann
  def _ExecDrbd8DiskOnly(self):
5496 2bb5c911 Michael Hanselmann
    """Replace a disk on the primary or secondary for DRBD 8.
5497 2bb5c911 Michael Hanselmann

5498 2bb5c911 Michael Hanselmann
    The algorithm for replace is quite complicated:
5499 2bb5c911 Michael Hanselmann

5500 2bb5c911 Michael Hanselmann
      1. for each disk to be replaced:
5501 2bb5c911 Michael Hanselmann

5502 2bb5c911 Michael Hanselmann
        1. create new LVs on the target node with unique names
5503 2bb5c911 Michael Hanselmann
        1. detach old LVs from the drbd device
5504 2bb5c911 Michael Hanselmann
        1. rename old LVs to name_replaced.<time_t>
5505 2bb5c911 Michael Hanselmann
        1. rename new LVs to old LVs
5506 2bb5c911 Michael Hanselmann
        1. attach the new LVs (with the old names now) to the drbd device
5507 2bb5c911 Michael Hanselmann

5508 2bb5c911 Michael Hanselmann
      1. wait for sync across all devices
5509 2bb5c911 Michael Hanselmann

5510 2bb5c911 Michael Hanselmann
      1. for each modified disk:
5511 2bb5c911 Michael Hanselmann

5512 2bb5c911 Michael Hanselmann
        1. remove old LVs (which have the name name_replaces.<time_t>)
5513 2bb5c911 Michael Hanselmann

5514 2bb5c911 Michael Hanselmann
    Failures are not very well handled.
5515 2bb5c911 Michael Hanselmann

5516 2bb5c911 Michael Hanselmann
    """
5517 2bb5c911 Michael Hanselmann
    steps_total = 6
5518 2bb5c911 Michael Hanselmann
5519 2bb5c911 Michael Hanselmann
    # Step: check device activation
5520 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
5521 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.other_node, self.target_node])
5522 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.target_node, self.other_node])
5523 2bb5c911 Michael Hanselmann
5524 2bb5c911 Michael Hanselmann
    # Step: check other node consistency
5525 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
5526 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.other_node,
5527 2bb5c911 Michael Hanselmann
                                self.other_node == self.instance.primary_node,
5528 2bb5c911 Michael Hanselmann
                                False)
5529 2bb5c911 Michael Hanselmann
5530 2bb5c911 Michael Hanselmann
    # Step: create new storage
5531 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
5532 2bb5c911 Michael Hanselmann
    iv_names = self._CreateNewStorage(self.target_node)
5533 a9e0c397 Iustin Pop
5534 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5535 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
5536 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5537 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
5538 2bb5c911 Michael Hanselmann
5539 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev, old_lvs)
5540 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
5541 2bb5c911 Michael Hanselmann
                   " %s for device %s" % (self.target_node, dev.iv_name))
5542 cff90b79 Iustin Pop
      #dev.children = []
5543 cff90b79 Iustin Pop
      #cfg.Update(instance)
5544 a9e0c397 Iustin Pop
5545 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5546 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5547 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5548 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5549 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5550 cff90b79 Iustin Pop
5551 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5552 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5553 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5554 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5555 2bb5c911 Michael Hanselmann
5556 2bb5c911 Michael Hanselmann
      # Build the rename list based on what LVs exist on the node
5557 2bb5c911 Michael Hanselmann
      rename_old_to_new = []
5558 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5559 2bb5c911 Michael Hanselmann
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
5560 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
5561 23829f6f Iustin Pop
          # device exists
5562 2bb5c911 Michael Hanselmann
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
5563 cff90b79 Iustin Pop
5564 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the old LVs on the target node")
5565 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node, rename_old_to_new)
5566 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
5567 2bb5c911 Michael Hanselmann
5568 2bb5c911 Michael Hanselmann
      # Now we rename the new LVs to the old LVs
5569 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Renaming the new LVs on the target node")
5570 2bb5c911 Michael Hanselmann
      rename_new_to_old = [(new, old.physical_id)
5571 2bb5c911 Michael Hanselmann
                           for old, new in zip(old_lvs, new_lvs)]
5572 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_rename(self.target_node, rename_new_to_old)
5573 2bb5c911 Michael Hanselmann
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
5574 cff90b79 Iustin Pop
5575 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5576 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5577 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(new, self.target_node)
5578 a9e0c397 Iustin Pop
5579 cff90b79 Iustin Pop
      for disk in old_lvs:
5580 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5581 2bb5c911 Michael Hanselmann
        self.cfg.SetDiskID(disk, self.target_node)
5582 a9e0c397 Iustin Pop
5583 2bb5c911 Michael Hanselmann
      # Now that the new lvs have the old name, we can add them to the device
5584 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
5585 2bb5c911 Michael Hanselmann
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev, new_lvs)
5586 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5587 2cc1da8b Iustin Pop
      if msg:
5588 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5589 2bb5c911 Michael Hanselmann
          msg2 = self.rpc.call_blockdev_remove(self.target_node, new_lv).fail_msg
5590 4c4e4e1e Iustin Pop
          if msg2:
5591 2bb5c911 Michael Hanselmann
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
5592 2bb5c911 Michael Hanselmann
                               hint=("cleanup manually the unused logical"
5593 2bb5c911 Michael Hanselmann
                                     "volumes"))
5594 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5595 a9e0c397 Iustin Pop
5596 a9e0c397 Iustin Pop
      dev.children = new_lvs
5597 a9e0c397 Iustin Pop
5598 2bb5c911 Michael Hanselmann
      self.cfg.Update(self.instance)
5599 a9e0c397 Iustin Pop
5600 2bb5c911 Michael Hanselmann
    # Wait for sync
5601 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
5602 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
5603 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
5604 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
5605 a9e0c397 Iustin Pop
5606 2bb5c911 Michael Hanselmann
    # Check all devices manually
5607 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
5608 a9e0c397 Iustin Pop
5609 cff90b79 Iustin Pop
    # Step: remove old storage
5610 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
5611 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
5612 a9e0c397 Iustin Pop
5613 2bb5c911 Michael Hanselmann
  def _ExecDrbd8Secondary(self):
5614 2bb5c911 Michael Hanselmann
    """Replace the secondary node for DRBD 8.
5615 a9e0c397 Iustin Pop

5616 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5617 a9e0c397 Iustin Pop
      - for all disks of the instance:
5618 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5619 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5620 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5621 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5622 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5623 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5624 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5625 a9e0c397 Iustin Pop
          not network enabled
5626 a9e0c397 Iustin Pop
      - wait for sync across all devices
5627 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5628 a9e0c397 Iustin Pop

5629 a9e0c397 Iustin Pop
    Failures are not very well handled.
5630 0834c866 Iustin Pop

5631 a9e0c397 Iustin Pop
    """
5632 0834c866 Iustin Pop
    steps_total = 6
5633 0834c866 Iustin Pop
5634 0834c866 Iustin Pop
    # Step: check device activation
5635 2bb5c911 Michael Hanselmann
    self.lu.LogStep(1, steps_total, "Check device existence")
5636 2bb5c911 Michael Hanselmann
    self._CheckDisksExistence([self.instance.primary_node])
5637 2bb5c911 Michael Hanselmann
    self._CheckVolumeGroup([self.instance.primary_node])
5638 0834c866 Iustin Pop
5639 0834c866 Iustin Pop
    # Step: check other node consistency
5640 2bb5c911 Michael Hanselmann
    self.lu.LogStep(2, steps_total, "Check peer consistency")
5641 2bb5c911 Michael Hanselmann
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
5642 0834c866 Iustin Pop
5643 0834c866 Iustin Pop
    # Step: create new storage
5644 2bb5c911 Michael Hanselmann
    self.lu.LogStep(3, steps_total, "Allocate new storage")
5645 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5646 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
5647 2bb5c911 Michael Hanselmann
                      (self.new_node, idx))
5648 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5649 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5650 2bb5c911 Michael Hanselmann
        _CreateBlockDev(self.lu, self.new_node, self.instance, new_lv, True,
5651 2bb5c911 Michael Hanselmann
                        _GetInstanceInfoText(self.instance), False)
5652 a9e0c397 Iustin Pop
5653 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5654 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5655 a1578d63 Iustin Pop
    # error and the success paths
5656 2bb5c911 Michael Hanselmann
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
5657 2bb5c911 Michael Hanselmann
    minors = self.cfg.AllocateDRBDMinor([self.new_node for dev in self.instance.disks],
5658 2bb5c911 Michael Hanselmann
                                        self.instance.name)
5659 2bb5c911 Michael Hanselmann
    logging.debug("Allocated minors %r" % (minors,))
5660 2bb5c911 Michael Hanselmann
5661 2bb5c911 Michael Hanselmann
    iv_names = {}
5662 2bb5c911 Michael Hanselmann
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
5663 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" % (self.new_node, idx))
5664 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5665 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5666 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5667 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5668 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5669 2bb5c911 Michael Hanselmann
      if self.instance.primary_node == o_node1:
5670 a2d59d8b Iustin Pop
        p_minor = o_minor1
5671 ffa1c0dc Iustin Pop
      else:
5672 a2d59d8b Iustin Pop
        p_minor = o_minor2
5673 a2d59d8b Iustin Pop
5674 2bb5c911 Michael Hanselmann
      new_alone_id = (self.instance.primary_node, self.new_node, None, p_minor, new_minor, o_secret)
5675 2bb5c911 Michael Hanselmann
      new_net_id = (self.instance.primary_node, self.new_node, o_port, p_minor, new_minor, o_secret)
5676 a2d59d8b Iustin Pop
5677 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5678 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5679 a2d59d8b Iustin Pop
                    new_net_id)
5680 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5681 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5682 8a6c7011 Iustin Pop
                              children=dev.children,
5683 8a6c7011 Iustin Pop
                              size=dev.size)
5684 796cab27 Iustin Pop
      try:
5685 2bb5c911 Michael Hanselmann
        _CreateSingleBlockDev(self.lu, self.new_node, self.instance, new_drbd,
5686 2bb5c911 Michael Hanselmann
                              _GetInstanceInfoText(self.instance), False)
5687 82759cb1 Iustin Pop
      except errors.GenericError:
5688 2bb5c911 Michael Hanselmann
        self.cfg.ReleaseDRBDMinors(self.instance.name)
5689 796cab27 Iustin Pop
        raise
5690 a9e0c397 Iustin Pop
5691 2bb5c911 Michael Hanselmann
    # We have new devices, shutdown the drbd on the old secondary
5692 2bb5c911 Michael Hanselmann
    for idx, dev in enumerate(self.instance.disks):
5693 2bb5c911 Michael Hanselmann
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node" % idx)
5694 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.target_node)
5695 2bb5c911 Michael Hanselmann
      msg = self.rpc.call_blockdev_shutdown(self.target_node, dev).fail_msg
5696 cacfd1fd Iustin Pop
      if msg:
5697 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
5698 2bb5c911 Michael Hanselmann
                           "node: %s" % (idx, msg),
5699 2bb5c911 Michael Hanselmann
                           hint=("Please cleanup this device manually as"
5700 2bb5c911 Michael Hanselmann
                                 " soon as possible"))
5701 a9e0c397 Iustin Pop
5702 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
5703 2bb5c911 Michael Hanselmann
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node], self.node_secondary_ip,
5704 2bb5c911 Michael Hanselmann
                                               self.instance.disks)[self.instance.primary_node]
5705 642445d9 Iustin Pop
5706 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5707 a2d59d8b Iustin Pop
    if msg:
5708 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5709 2bb5c911 Michael Hanselmann
      self.cfg.ReleaseDRBDMinors(self.instance.name)
5710 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5711 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5712 642445d9 Iustin Pop
5713 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5714 642445d9 Iustin Pop
    # the instance to point to the new secondary
5715 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Updating instance configuration")
5716 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5717 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5718 2bb5c911 Michael Hanselmann
      self.cfg.SetDiskID(dev, self.instance.primary_node)
5719 2bb5c911 Michael Hanselmann
5720 2bb5c911 Michael Hanselmann
    self.cfg.Update(self.instance)
5721 a9e0c397 Iustin Pop
5722 642445d9 Iustin Pop
    # and now perform the drbd attach
5723 2bb5c911 Michael Hanselmann
    self.lu.LogInfo("Attaching primary drbds to new secondary"
5724 2bb5c911 Michael Hanselmann
                    " (standalone => connected)")
5725 2bb5c911 Michael Hanselmann
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node, self.new_node], self.node_secondary_ip,
5726 2bb5c911 Michael Hanselmann
                                           self.instance.disks, self.instance.name,
5727 a2d59d8b Iustin Pop
                                           False)
5728 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5729 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
5730 a2d59d8b Iustin Pop
      if msg:
5731 2bb5c911 Michael Hanselmann
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s", to_node, msg,
5732 2bb5c911 Michael Hanselmann
                           hint=("please do a gnt-instance info to see the"
5733 2bb5c911 Michael Hanselmann
                                 " status of disks"))
5734 a9e0c397 Iustin Pop
5735 2bb5c911 Michael Hanselmann
    # Wait for sync
5736 2bb5c911 Michael Hanselmann
    # This can fail as the old devices are degraded and _WaitForSync
5737 2bb5c911 Michael Hanselmann
    # does a combined result over all disks, so we don't check its return value
5738 2bb5c911 Michael Hanselmann
    self.lu.LogStep(5, steps_total, "Sync devices")
5739 2bb5c911 Michael Hanselmann
    _WaitForSync(self.lu, self.instance, unlock=True)
5740 a9e0c397 Iustin Pop
5741 2bb5c911 Michael Hanselmann
    # Check all devices manually
5742 2bb5c911 Michael Hanselmann
    self._CheckDevices(self.instance.primary_node, iv_names)
5743 22985314 Guido Trotter
5744 2bb5c911 Michael Hanselmann
    # Step: remove old storage
5745 2bb5c911 Michael Hanselmann
    self.lu.LogStep(6, steps_total, "Removing old storage")
5746 2bb5c911 Michael Hanselmann
    self._RemoveOldStorage(self.target_node, iv_names)
5747 a9e0c397 Iustin Pop
5748 a8083063 Iustin Pop
5749 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5750 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5751 8729e0d7 Iustin Pop

5752 8729e0d7 Iustin Pop
  """
5753 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5754 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5755 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5756 31e63dbf Guido Trotter
  REQ_BGL = False
5757 31e63dbf Guido Trotter
5758 31e63dbf Guido Trotter
  def ExpandNames(self):
5759 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5760 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5761 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5762 31e63dbf Guido Trotter
5763 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5764 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5765 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5766 8729e0d7 Iustin Pop
5767 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5768 8729e0d7 Iustin Pop
    """Build hooks env.
5769 8729e0d7 Iustin Pop

5770 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5771 8729e0d7 Iustin Pop

5772 8729e0d7 Iustin Pop
    """
5773 8729e0d7 Iustin Pop
    env = {
5774 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5775 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5776 8729e0d7 Iustin Pop
      }
5777 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5778 8729e0d7 Iustin Pop
    nl = [
5779 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5780 8729e0d7 Iustin Pop
      self.instance.primary_node,
5781 8729e0d7 Iustin Pop
      ]
5782 8729e0d7 Iustin Pop
    return env, nl, nl
5783 8729e0d7 Iustin Pop
5784 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5785 8729e0d7 Iustin Pop
    """Check prerequisites.
5786 8729e0d7 Iustin Pop

5787 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5788 8729e0d7 Iustin Pop

5789 8729e0d7 Iustin Pop
    """
5790 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5791 31e63dbf Guido Trotter
    assert instance is not None, \
5792 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5793 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5794 6b12959c Iustin Pop
    for node in nodenames:
5795 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5796 7527a8a4 Iustin Pop
5797 31e63dbf Guido Trotter
5798 8729e0d7 Iustin Pop
    self.instance = instance
5799 8729e0d7 Iustin Pop
5800 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5801 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5802 8729e0d7 Iustin Pop
                                 " growing.")
5803 8729e0d7 Iustin Pop
5804 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5805 8729e0d7 Iustin Pop
5806 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5807 72737a7f Iustin Pop
                                       instance.hypervisor)
5808 8729e0d7 Iustin Pop
    for node in nodenames:
5809 781de953 Iustin Pop
      info = nodeinfo[node]
5810 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
5811 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
5812 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5813 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5814 8729e0d7 Iustin Pop
                                   " node %s" % node)
5815 781de953 Iustin Pop
      if self.op.amount > vg_free:
5816 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5817 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5818 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5819 8729e0d7 Iustin Pop
5820 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5821 8729e0d7 Iustin Pop
    """Execute disk grow.
5822 8729e0d7 Iustin Pop

5823 8729e0d7 Iustin Pop
    """
5824 8729e0d7 Iustin Pop
    instance = self.instance
5825 ad24e046 Iustin Pop
    disk = self.disk
5826 6b12959c Iustin Pop
    for node in instance.all_nodes:
5827 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5828 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5829 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
5830 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5831 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5832 6605411d Iustin Pop
    if self.op.wait_for_sync:
5833 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5834 6605411d Iustin Pop
      if disk_abort:
5835 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5836 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5837 8729e0d7 Iustin Pop
5838 8729e0d7 Iustin Pop
5839 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5840 a8083063 Iustin Pop
  """Query runtime instance data.
5841 a8083063 Iustin Pop

5842 a8083063 Iustin Pop
  """
5843 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5844 a987fa48 Guido Trotter
  REQ_BGL = False
5845 ae5849b5 Michael Hanselmann
5846 a987fa48 Guido Trotter
  def ExpandNames(self):
5847 a987fa48 Guido Trotter
    self.needed_locks = {}
5848 c772d142 Michael Hanselmann
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
5849 a987fa48 Guido Trotter
5850 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5851 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5852 a987fa48 Guido Trotter
5853 a987fa48 Guido Trotter
    if self.op.instances:
5854 a987fa48 Guido Trotter
      self.wanted_names = []
5855 a987fa48 Guido Trotter
      for name in self.op.instances:
5856 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5857 a987fa48 Guido Trotter
        if full_name is None:
5858 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5859 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5860 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5861 a987fa48 Guido Trotter
    else:
5862 a987fa48 Guido Trotter
      self.wanted_names = None
5863 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5864 a987fa48 Guido Trotter
5865 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5866 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5867 a987fa48 Guido Trotter
5868 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5869 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5870 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5871 a8083063 Iustin Pop
5872 a8083063 Iustin Pop
  def CheckPrereq(self):
5873 a8083063 Iustin Pop
    """Check prerequisites.
5874 a8083063 Iustin Pop

5875 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5876 a8083063 Iustin Pop

5877 a8083063 Iustin Pop
    """
5878 a987fa48 Guido Trotter
    if self.wanted_names is None:
5879 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5880 a8083063 Iustin Pop
5881 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5882 a987fa48 Guido Trotter
                             in self.wanted_names]
5883 a987fa48 Guido Trotter
    return
5884 a8083063 Iustin Pop
5885 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5886 a8083063 Iustin Pop
    """Compute block device status.
5887 a8083063 Iustin Pop

5888 a8083063 Iustin Pop
    """
5889 57821cac Iustin Pop
    static = self.op.static
5890 57821cac Iustin Pop
    if not static:
5891 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5892 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5893 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5894 9854f5d0 Iustin Pop
        dev_pstatus = None
5895 9854f5d0 Iustin Pop
      else:
5896 4c4e4e1e Iustin Pop
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5897 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5898 57821cac Iustin Pop
    else:
5899 57821cac Iustin Pop
      dev_pstatus = None
5900 57821cac Iustin Pop
5901 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5902 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5903 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5904 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5905 a8083063 Iustin Pop
      else:
5906 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5907 a8083063 Iustin Pop
5908 57821cac Iustin Pop
    if snode and not static:
5909 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5910 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5911 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5912 9854f5d0 Iustin Pop
        dev_sstatus = None
5913 9854f5d0 Iustin Pop
      else:
5914 4c4e4e1e Iustin Pop
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5915 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5916 a8083063 Iustin Pop
    else:
5917 a8083063 Iustin Pop
      dev_sstatus = None
5918 a8083063 Iustin Pop
5919 a8083063 Iustin Pop
    if dev.children:
5920 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5921 a8083063 Iustin Pop
                      for child in dev.children]
5922 a8083063 Iustin Pop
    else:
5923 a8083063 Iustin Pop
      dev_children = []
5924 a8083063 Iustin Pop
5925 a8083063 Iustin Pop
    data = {
5926 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5927 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5928 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5929 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5930 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5931 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5932 a8083063 Iustin Pop
      "children": dev_children,
5933 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5934 c98162a7 Iustin Pop
      "size": dev.size,
5935 a8083063 Iustin Pop
      }
5936 a8083063 Iustin Pop
5937 a8083063 Iustin Pop
    return data
5938 a8083063 Iustin Pop
5939 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5940 a8083063 Iustin Pop
    """Gather and return data"""
5941 a8083063 Iustin Pop
    result = {}
5942 338e51e8 Iustin Pop
5943 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5944 338e51e8 Iustin Pop
5945 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5946 57821cac Iustin Pop
      if not self.op.static:
5947 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5948 57821cac Iustin Pop
                                                  instance.name,
5949 57821cac Iustin Pop
                                                  instance.hypervisor)
5950 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5951 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
5952 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5953 57821cac Iustin Pop
          remote_state = "up"
5954 57821cac Iustin Pop
        else:
5955 57821cac Iustin Pop
          remote_state = "down"
5956 a8083063 Iustin Pop
      else:
5957 57821cac Iustin Pop
        remote_state = None
5958 0d68c45d Iustin Pop
      if instance.admin_up:
5959 a8083063 Iustin Pop
        config_state = "up"
5960 0d68c45d Iustin Pop
      else:
5961 0d68c45d Iustin Pop
        config_state = "down"
5962 a8083063 Iustin Pop
5963 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5964 a8083063 Iustin Pop
               for device in instance.disks]
5965 a8083063 Iustin Pop
5966 a8083063 Iustin Pop
      idict = {
5967 a8083063 Iustin Pop
        "name": instance.name,
5968 a8083063 Iustin Pop
        "config_state": config_state,
5969 a8083063 Iustin Pop
        "run_state": remote_state,
5970 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5971 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5972 a8083063 Iustin Pop
        "os": instance.os,
5973 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
5974 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
5975 a8083063 Iustin Pop
        "disks": disks,
5976 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5977 24838135 Iustin Pop
        "network_port": instance.network_port,
5978 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5979 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5980 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5981 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5982 a8083063 Iustin Pop
        }
5983 a8083063 Iustin Pop
5984 a8083063 Iustin Pop
      result[instance.name] = idict
5985 a8083063 Iustin Pop
5986 a8083063 Iustin Pop
    return result
5987 a8083063 Iustin Pop
5988 a8083063 Iustin Pop
5989 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5990 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5991 a8083063 Iustin Pop

5992 a8083063 Iustin Pop
  """
5993 a8083063 Iustin Pop
  HPATH = "instance-modify"
5994 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5995 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5996 1a5c7281 Guido Trotter
  REQ_BGL = False
5997 1a5c7281 Guido Trotter
5998 24991749 Iustin Pop
  def CheckArguments(self):
5999 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
6000 24991749 Iustin Pop
      self.op.nics = []
6001 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
6002 24991749 Iustin Pop
      self.op.disks = []
6003 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
6004 24991749 Iustin Pop
      self.op.beparams = {}
6005 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
6006 24991749 Iustin Pop
      self.op.hvparams = {}
6007 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
6008 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
6009 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
6010 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
6011 24991749 Iustin Pop
6012 24991749 Iustin Pop
    # Disk validation
6013 24991749 Iustin Pop
    disk_addremove = 0
6014 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6015 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6016 24991749 Iustin Pop
        disk_addremove += 1
6017 24991749 Iustin Pop
        continue
6018 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6019 24991749 Iustin Pop
        disk_addremove += 1
6020 24991749 Iustin Pop
      else:
6021 24991749 Iustin Pop
        if not isinstance(disk_op, int):
6022 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
6023 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
6024 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
6025 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
6026 8b46606c Guido Trotter
6027 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
6028 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
6029 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
6030 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
6031 24991749 Iustin Pop
        size = disk_dict.get('size', None)
6032 24991749 Iustin Pop
        if size is None:
6033 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
6034 24991749 Iustin Pop
        try:
6035 24991749 Iustin Pop
          size = int(size)
6036 24991749 Iustin Pop
        except ValueError, err:
6037 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
6038 24991749 Iustin Pop
                                     str(err))
6039 24991749 Iustin Pop
        disk_dict['size'] = size
6040 24991749 Iustin Pop
      else:
6041 24991749 Iustin Pop
        # modification of disk
6042 24991749 Iustin Pop
        if 'size' in disk_dict:
6043 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
6044 24991749 Iustin Pop
                                     " grow-disk")
6045 24991749 Iustin Pop
6046 24991749 Iustin Pop
    if disk_addremove > 1:
6047 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
6048 24991749 Iustin Pop
                                 " supported at a time")
6049 24991749 Iustin Pop
6050 24991749 Iustin Pop
    # NIC validation
6051 24991749 Iustin Pop
    nic_addremove = 0
6052 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6053 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6054 24991749 Iustin Pop
        nic_addremove += 1
6055 24991749 Iustin Pop
        continue
6056 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6057 24991749 Iustin Pop
        nic_addremove += 1
6058 24991749 Iustin Pop
      else:
6059 24991749 Iustin Pop
        if not isinstance(nic_op, int):
6060 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
6061 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
6062 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
6063 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
6064 24991749 Iustin Pop
6065 24991749 Iustin Pop
      # nic_dict should be a dict
6066 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
6067 24991749 Iustin Pop
      if nic_ip is not None:
6068 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
6069 24991749 Iustin Pop
          nic_dict['ip'] = None
6070 24991749 Iustin Pop
        else:
6071 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
6072 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
6073 5c44da6a Guido Trotter
6074 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
6075 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
6076 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
6077 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
6078 29921401 Iustin Pop
                                   " at the same time")
6079 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
6080 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
6081 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
6082 cd098c41 Guido Trotter
        nic_dict['link'] = None
6083 cd098c41 Guido Trotter
6084 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6085 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6086 5c44da6a Guido Trotter
        if nic_mac is None:
6087 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6088 5c44da6a Guido Trotter
6089 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6090 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6091 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6092 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6093 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6094 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6095 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6096 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6097 5c44da6a Guido Trotter
6098 24991749 Iustin Pop
    if nic_addremove > 1:
6099 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6100 24991749 Iustin Pop
                                 " supported at a time")
6101 24991749 Iustin Pop
6102 1a5c7281 Guido Trotter
  def ExpandNames(self):
6103 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6104 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6105 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6106 74409b12 Iustin Pop
6107 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6108 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6109 74409b12 Iustin Pop
      self._LockInstancesNodes()
6110 a8083063 Iustin Pop
6111 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6112 a8083063 Iustin Pop
    """Build hooks env.
6113 a8083063 Iustin Pop

6114 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6115 a8083063 Iustin Pop

6116 a8083063 Iustin Pop
    """
6117 396e1b78 Michael Hanselmann
    args = dict()
6118 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6119 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6120 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6121 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6122 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6123 d8dcf3c9 Guido Trotter
    # information at all.
6124 d8dcf3c9 Guido Trotter
    if self.op.nics:
6125 d8dcf3c9 Guido Trotter
      args['nics'] = []
6126 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6127 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
6128 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6129 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6130 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6131 d8dcf3c9 Guido Trotter
        else:
6132 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6133 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6134 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6135 d8dcf3c9 Guido Trotter
        else:
6136 d8dcf3c9 Guido Trotter
          ip = nic.ip
6137 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6138 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6139 d8dcf3c9 Guido Trotter
        else:
6140 d8dcf3c9 Guido Trotter
          mac = nic.mac
6141 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
6142 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
6143 62f0dd02 Guido Trotter
        else:
6144 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
6145 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6146 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6147 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6148 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6149 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6150 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6151 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
6152 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6153 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6154 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6155 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6156 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6157 d8dcf3c9 Guido Trotter
6158 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6159 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6160 a8083063 Iustin Pop
    return env, nl, nl
6161 a8083063 Iustin Pop
6162 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
6163 0329617a Guido Trotter
                        default_values, parameter_types):
6164 0329617a Guido Trotter
    """Return the new params dict for the given params.
6165 0329617a Guido Trotter

6166 0329617a Guido Trotter
    @type old_params: dict
6167 f2fd87d7 Iustin Pop
    @param old_params: old parameters
6168 0329617a Guido Trotter
    @type update_dict: dict
6169 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
6170 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
6171 f2fd87d7 Iustin Pop
                        parameter to its default value
6172 0329617a Guido Trotter
    @type default_values: dict
6173 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
6174 0329617a Guido Trotter
    @type parameter_types: dict
6175 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
6176 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
6177 0329617a Guido Trotter
    @rtype: (dict, dict)
6178 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
6179 0329617a Guido Trotter

6180 0329617a Guido Trotter
    """
6181 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
6182 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
6183 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
6184 0329617a Guido Trotter
        try:
6185 0329617a Guido Trotter
          del params_copy[key]
6186 0329617a Guido Trotter
        except KeyError:
6187 0329617a Guido Trotter
          pass
6188 0329617a Guido Trotter
      else:
6189 0329617a Guido Trotter
        params_copy[key] = val
6190 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
6191 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
6192 0329617a Guido Trotter
    return (params_copy, params_filled)
6193 0329617a Guido Trotter
6194 a8083063 Iustin Pop
  def CheckPrereq(self):
6195 a8083063 Iustin Pop
    """Check prerequisites.
6196 a8083063 Iustin Pop

6197 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6198 a8083063 Iustin Pop

6199 a8083063 Iustin Pop
    """
6200 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
6201 a8083063 Iustin Pop
6202 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6203 31a853d2 Iustin Pop
6204 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6205 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
6206 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6207 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6208 6b12959c Iustin Pop
    pnode = instance.primary_node
6209 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6210 74409b12 Iustin Pop
6211 338e51e8 Iustin Pop
    # hvparams processing
6212 74409b12 Iustin Pop
    if self.op.hvparams:
6213 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
6214 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
6215 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
6216 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
6217 74409b12 Iustin Pop
      # local check
6218 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6219 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6220 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6221 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6222 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6223 338e51e8 Iustin Pop
    else:
6224 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6225 338e51e8 Iustin Pop
6226 338e51e8 Iustin Pop
    # beparams processing
6227 338e51e8 Iustin Pop
    if self.op.beparams:
6228 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
6229 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
6230 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
6231 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
6232 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6233 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6234 338e51e8 Iustin Pop
    else:
6235 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6236 74409b12 Iustin Pop
6237 cfefe007 Guido Trotter
    self.warn = []
6238 647a5d80 Iustin Pop
6239 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6240 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6241 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6242 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6243 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6244 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6245 72737a7f Iustin Pop
                                                  instance.hypervisor)
6246 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6247 72737a7f Iustin Pop
                                         instance.hypervisor)
6248 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
6249 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
6250 070e998b Iustin Pop
      if msg:
6251 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6252 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
6253 070e998b Iustin Pop
                         (pnode,  msg))
6254 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6255 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
6256 070e998b Iustin Pop
                         " free memory information" % pnode)
6257 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
6258 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
6259 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
6260 cfefe007 Guido Trotter
      else:
6261 7ad1af4a Iustin Pop
        if instance_info.payload:
6262 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
6263 cfefe007 Guido Trotter
        else:
6264 cfefe007 Guido Trotter
          # Assume instance not running
6265 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6266 cfefe007 Guido Trotter
          # and we have no other way to check)
6267 cfefe007 Guido Trotter
          current_mem = 0
6268 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6269 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
6270 cfefe007 Guido Trotter
        if miss_mem > 0:
6271 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6272 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6273 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6274 cfefe007 Guido Trotter
6275 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6276 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
6277 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6278 ea33068f Iustin Pop
            continue
6279 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
6280 070e998b Iustin Pop
          if msg:
6281 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
6282 070e998b Iustin Pop
                             (node, msg))
6283 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
6284 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
6285 070e998b Iustin Pop
                             " memory information" % node)
6286 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6287 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6288 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6289 5bc84f33 Alexander Schreiber
6290 24991749 Iustin Pop
    # NIC processing
6291 cd098c41 Guido Trotter
    self.nic_pnew = {}
6292 cd098c41 Guido Trotter
    self.nic_pinst = {}
6293 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6294 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6295 24991749 Iustin Pop
        if not instance.nics:
6296 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6297 24991749 Iustin Pop
        continue
6298 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6299 24991749 Iustin Pop
        # an existing nic
6300 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6301 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6302 24991749 Iustin Pop
                                     " are 0 to %d" %
6303 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6304 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
6305 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
6306 cd098c41 Guido Trotter
      else:
6307 cd098c41 Guido Trotter
        old_nic_params = {}
6308 cd098c41 Guido Trotter
        old_nic_ip = None
6309 cd098c41 Guido Trotter
6310 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
6311 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
6312 cd098c41 Guido Trotter
                                 if key in nic_dict])
6313 cd098c41 Guido Trotter
6314 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6315 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6316 cd098c41 Guido Trotter
6317 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
6318 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6319 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
6320 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
6321 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6322 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
6323 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
6324 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6325 cd098c41 Guido Trotter
6326 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6327 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6328 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6329 35c0c8da Iustin Pop
        if msg:
6330 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6331 24991749 Iustin Pop
          if self.force:
6332 24991749 Iustin Pop
            self.warn.append(msg)
6333 24991749 Iustin Pop
          else:
6334 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6335 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6336 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
6337 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
6338 cd098c41 Guido Trotter
        else:
6339 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
6340 cd098c41 Guido Trotter
        if nic_ip is None:
6341 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6342 cd098c41 Guido Trotter
                                     ' on a routed nic')
6343 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6344 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6345 5c44da6a Guido Trotter
        if nic_mac is None:
6346 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6347 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6348 5c44da6a Guido Trotter
          # otherwise generate the mac
6349 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6350 5c44da6a Guido Trotter
        else:
6351 5c44da6a Guido Trotter
          # or validate/reserve the current one
6352 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6353 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6354 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6355 24991749 Iustin Pop
6356 24991749 Iustin Pop
    # DISK processing
6357 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6358 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6359 24991749 Iustin Pop
                                 " diskless instances")
6360 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6361 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6362 24991749 Iustin Pop
        if len(instance.disks) == 1:
6363 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6364 24991749 Iustin Pop
                                     " an instance")
6365 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6366 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6367 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
6368 aca13712 Iustin Pop
        if msg:
6369 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6370 aca13712 Iustin Pop
                                     (pnode, msg))
6371 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
6372 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6373 24991749 Iustin Pop
                                     " disks.")
6374 24991749 Iustin Pop
6375 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6376 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6377 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6378 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6379 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6380 24991749 Iustin Pop
        # an existing disk
6381 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6382 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6383 24991749 Iustin Pop
                                     " are 0 to %d" %
6384 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6385 24991749 Iustin Pop
6386 a8083063 Iustin Pop
    return
6387 a8083063 Iustin Pop
6388 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6389 a8083063 Iustin Pop
    """Modifies an instance.
6390 a8083063 Iustin Pop

6391 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6392 24991749 Iustin Pop

6393 a8083063 Iustin Pop
    """
6394 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6395 cfefe007 Guido Trotter
    # feedback_fn there.
6396 cfefe007 Guido Trotter
    for warn in self.warn:
6397 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6398 cfefe007 Guido Trotter
6399 a8083063 Iustin Pop
    result = []
6400 a8083063 Iustin Pop
    instance = self.instance
6401 cd098c41 Guido Trotter
    cluster = self.cluster
6402 24991749 Iustin Pop
    # disk changes
6403 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6404 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6405 24991749 Iustin Pop
        # remove the last disk
6406 24991749 Iustin Pop
        device = instance.disks.pop()
6407 24991749 Iustin Pop
        device_idx = len(instance.disks)
6408 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6409 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6410 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6411 e1bc0878 Iustin Pop
          if msg:
6412 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6413 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6414 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6415 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6416 24991749 Iustin Pop
        # add a new disk
6417 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6418 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6419 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6420 24991749 Iustin Pop
        else:
6421 24991749 Iustin Pop
          file_driver = file_path = None
6422 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6423 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6424 24991749 Iustin Pop
                                         instance.disk_template,
6425 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6426 24991749 Iustin Pop
                                         instance.secondary_nodes,
6427 24991749 Iustin Pop
                                         [disk_dict],
6428 24991749 Iustin Pop
                                         file_path,
6429 24991749 Iustin Pop
                                         file_driver,
6430 24991749 Iustin Pop
                                         disk_idx_base)[0]
6431 24991749 Iustin Pop
        instance.disks.append(new_disk)
6432 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6433 24991749 Iustin Pop
6434 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6435 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6436 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6437 24991749 Iustin Pop
        #HARDCODE
6438 428958aa Iustin Pop
        for node in instance.all_nodes:
6439 428958aa Iustin Pop
          f_create = node == instance.primary_node
6440 796cab27 Iustin Pop
          try:
6441 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6442 428958aa Iustin Pop
                            f_create, info, f_create)
6443 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6444 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6445 428958aa Iustin Pop
                            " node %s: %s",
6446 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6447 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6448 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6449 24991749 Iustin Pop
      else:
6450 24991749 Iustin Pop
        # change a given disk
6451 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6452 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6453 24991749 Iustin Pop
    # NIC changes
6454 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6455 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6456 24991749 Iustin Pop
        # remove the last nic
6457 24991749 Iustin Pop
        del instance.nics[-1]
6458 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6459 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6460 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6461 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6462 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
6463 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
6464 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6465 24991749 Iustin Pop
        instance.nics.append(new_nic)
6466 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6467 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6468 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
6469 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6470 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6471 cd098c41 Guido Trotter
                       )))
6472 24991749 Iustin Pop
      else:
6473 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
6474 24991749 Iustin Pop
          if key in nic_dict:
6475 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6476 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
6477 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6478 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
6479 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
6480 24991749 Iustin Pop
6481 24991749 Iustin Pop
    # hvparams changes
6482 74409b12 Iustin Pop
    if self.op.hvparams:
6483 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6484 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6485 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6486 24991749 Iustin Pop
6487 24991749 Iustin Pop
    # beparams changes
6488 338e51e8 Iustin Pop
    if self.op.beparams:
6489 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6490 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6491 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6492 a8083063 Iustin Pop
6493 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6494 a8083063 Iustin Pop
6495 a8083063 Iustin Pop
    return result
6496 a8083063 Iustin Pop
6497 a8083063 Iustin Pop
6498 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6499 a8083063 Iustin Pop
  """Query the exports list
6500 a8083063 Iustin Pop

6501 a8083063 Iustin Pop
  """
6502 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6503 21a15682 Guido Trotter
  REQ_BGL = False
6504 21a15682 Guido Trotter
6505 21a15682 Guido Trotter
  def ExpandNames(self):
6506 21a15682 Guido Trotter
    self.needed_locks = {}
6507 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6508 21a15682 Guido Trotter
    if not self.op.nodes:
6509 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6510 21a15682 Guido Trotter
    else:
6511 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6512 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6513 a8083063 Iustin Pop
6514 a8083063 Iustin Pop
  def CheckPrereq(self):
6515 21a15682 Guido Trotter
    """Check prerequisites.
6516 a8083063 Iustin Pop

6517 a8083063 Iustin Pop
    """
6518 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6519 a8083063 Iustin Pop
6520 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6521 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6522 a8083063 Iustin Pop

6523 e4376078 Iustin Pop
    @rtype: dict
6524 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6525 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6526 e4376078 Iustin Pop
        that node.
6527 a8083063 Iustin Pop

6528 a8083063 Iustin Pop
    """
6529 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6530 b04285f2 Guido Trotter
    result = {}
6531 b04285f2 Guido Trotter
    for node in rpcresult:
6532 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
6533 b04285f2 Guido Trotter
        result[node] = False
6534 b04285f2 Guido Trotter
      else:
6535 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
6536 b04285f2 Guido Trotter
6537 b04285f2 Guido Trotter
    return result
6538 a8083063 Iustin Pop
6539 a8083063 Iustin Pop
6540 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6541 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6542 a8083063 Iustin Pop

6543 a8083063 Iustin Pop
  """
6544 a8083063 Iustin Pop
  HPATH = "instance-export"
6545 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6546 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6547 6657590e Guido Trotter
  REQ_BGL = False
6548 6657590e Guido Trotter
6549 6657590e Guido Trotter
  def ExpandNames(self):
6550 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6551 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6552 6657590e Guido Trotter
    #
6553 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6554 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6555 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6556 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6557 6657590e Guido Trotter
    #    then one to remove, after
6558 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
6559 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6560 6657590e Guido Trotter
6561 6657590e Guido Trotter
  def DeclareLocks(self, level):
6562 6657590e Guido Trotter
    """Last minute lock declaration."""
6563 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6564 a8083063 Iustin Pop
6565 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6566 a8083063 Iustin Pop
    """Build hooks env.
6567 a8083063 Iustin Pop

6568 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6569 a8083063 Iustin Pop

6570 a8083063 Iustin Pop
    """
6571 a8083063 Iustin Pop
    env = {
6572 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6573 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6574 a8083063 Iustin Pop
      }
6575 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6576 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6577 a8083063 Iustin Pop
          self.op.target_node]
6578 a8083063 Iustin Pop
    return env, nl, nl
6579 a8083063 Iustin Pop
6580 a8083063 Iustin Pop
  def CheckPrereq(self):
6581 a8083063 Iustin Pop
    """Check prerequisites.
6582 a8083063 Iustin Pop

6583 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6584 a8083063 Iustin Pop

6585 a8083063 Iustin Pop
    """
6586 6657590e Guido Trotter
    instance_name = self.op.instance_name
6587 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6588 6657590e Guido Trotter
    assert self.instance is not None, \
6589 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6590 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6591 a8083063 Iustin Pop
6592 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6593 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6594 a8083063 Iustin Pop
6595 268b8e42 Iustin Pop
    if self.dst_node is None:
6596 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6597 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6598 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6599 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6600 a8083063 Iustin Pop
6601 b6023d6c Manuel Franceschini
    # instance disk type verification
6602 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6603 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6604 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6605 b6023d6c Manuel Franceschini
                                   " file-based disks")
6606 b6023d6c Manuel Franceschini
6607 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6608 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6609 a8083063 Iustin Pop

6610 a8083063 Iustin Pop
    """
6611 a8083063 Iustin Pop
    instance = self.instance
6612 a8083063 Iustin Pop
    dst_node = self.dst_node
6613 a8083063 Iustin Pop
    src_node = instance.primary_node
6614 a8083063 Iustin Pop
    if self.op.shutdown:
6615 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6616 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6617 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
6618 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
6619 a8083063 Iustin Pop
6620 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6621 a8083063 Iustin Pop
6622 a8083063 Iustin Pop
    snap_disks = []
6623 a8083063 Iustin Pop
6624 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6625 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6626 998c712c Iustin Pop
    for disk in instance.disks:
6627 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6628 998c712c Iustin Pop
6629 a8083063 Iustin Pop
    try:
6630 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6631 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6632 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6633 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6634 87812fd3 Iustin Pop
        if msg:
6635 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
6636 af0413bb Guido Trotter
                          idx, src_node, msg)
6637 19d7f90a Guido Trotter
          snap_disks.append(False)
6638 19d7f90a Guido Trotter
        else:
6639 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
6640 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6641 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
6642 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6643 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6644 a8083063 Iustin Pop
6645 a8083063 Iustin Pop
    finally:
6646 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6647 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6648 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6649 dd279568 Iustin Pop
        if msg:
6650 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6651 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6652 a8083063 Iustin Pop
6653 a8083063 Iustin Pop
    # TODO: check for size
6654 a8083063 Iustin Pop
6655 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6656 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6657 19d7f90a Guido Trotter
      if dev:
6658 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6659 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6660 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6661 ba55d062 Iustin Pop
        if msg:
6662 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
6663 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
6664 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6665 e1bc0878 Iustin Pop
        if msg:
6666 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6667 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6668 a8083063 Iustin Pop
6669 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6670 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6671 9b201a0d Iustin Pop
    if msg:
6672 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
6673 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
6674 a8083063 Iustin Pop
6675 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6676 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6677 a8083063 Iustin Pop
6678 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6679 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6680 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6681 35fbcd11 Iustin Pop
    iname = instance.name
6682 a8083063 Iustin Pop
    if nodelist:
6683 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6684 a8083063 Iustin Pop
      for node in exportlist:
6685 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
6686 781de953 Iustin Pop
          continue
6687 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
6688 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6689 35fbcd11 Iustin Pop
          if msg:
6690 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6691 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
6692 5c947f38 Iustin Pop
6693 5c947f38 Iustin Pop
6694 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6695 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6696 9ac99fda Guido Trotter

6697 9ac99fda Guido Trotter
  """
6698 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6699 3656b3af Guido Trotter
  REQ_BGL = False
6700 3656b3af Guido Trotter
6701 3656b3af Guido Trotter
  def ExpandNames(self):
6702 3656b3af Guido Trotter
    self.needed_locks = {}
6703 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6704 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6705 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6706 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6707 9ac99fda Guido Trotter
6708 9ac99fda Guido Trotter
  def CheckPrereq(self):
6709 9ac99fda Guido Trotter
    """Check prerequisites.
6710 9ac99fda Guido Trotter
    """
6711 9ac99fda Guido Trotter
    pass
6712 9ac99fda Guido Trotter
6713 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6714 9ac99fda Guido Trotter
    """Remove any export.
6715 9ac99fda Guido Trotter

6716 9ac99fda Guido Trotter
    """
6717 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6718 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6719 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6720 9ac99fda Guido Trotter
    fqdn_warn = False
6721 9ac99fda Guido Trotter
    if not instance_name:
6722 9ac99fda Guido Trotter
      fqdn_warn = True
6723 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6724 9ac99fda Guido Trotter
6725 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6726 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
6727 9ac99fda Guido Trotter
    found = False
6728 9ac99fda Guido Trotter
    for node in exportlist:
6729 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
6730 1b7bfbb7 Iustin Pop
      if msg:
6731 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6732 781de953 Iustin Pop
        continue
6733 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
6734 9ac99fda Guido Trotter
        found = True
6735 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6736 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6737 35fbcd11 Iustin Pop
        if msg:
6738 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6739 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
6740 9ac99fda Guido Trotter
6741 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6742 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6743 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6744 9ac99fda Guido Trotter
                  " Domain Name.")
6745 9ac99fda Guido Trotter
6746 9ac99fda Guido Trotter
6747 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6748 5c947f38 Iustin Pop
  """Generic tags LU.
6749 5c947f38 Iustin Pop

6750 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6751 5c947f38 Iustin Pop

6752 5c947f38 Iustin Pop
  """
6753 5c947f38 Iustin Pop
6754 8646adce Guido Trotter
  def ExpandNames(self):
6755 8646adce Guido Trotter
    self.needed_locks = {}
6756 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6757 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6758 5c947f38 Iustin Pop
      if name is None:
6759 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6760 3ecf6786 Iustin Pop
                                   (self.op.name,))
6761 5c947f38 Iustin Pop
      self.op.name = name
6762 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6763 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6764 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6765 5c947f38 Iustin Pop
      if name is None:
6766 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6767 3ecf6786 Iustin Pop
                                   (self.op.name,))
6768 5c947f38 Iustin Pop
      self.op.name = name
6769 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6770 8646adce Guido Trotter
6771 8646adce Guido Trotter
  def CheckPrereq(self):
6772 8646adce Guido Trotter
    """Check prerequisites.
6773 8646adce Guido Trotter

6774 8646adce Guido Trotter
    """
6775 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6776 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6777 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6778 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6779 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6780 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6781 5c947f38 Iustin Pop
    else:
6782 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6783 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6784 5c947f38 Iustin Pop
6785 5c947f38 Iustin Pop
6786 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6787 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6788 5c947f38 Iustin Pop

6789 5c947f38 Iustin Pop
  """
6790 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6791 8646adce Guido Trotter
  REQ_BGL = False
6792 5c947f38 Iustin Pop
6793 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6794 5c947f38 Iustin Pop
    """Returns the tag list.
6795 5c947f38 Iustin Pop

6796 5c947f38 Iustin Pop
    """
6797 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6798 5c947f38 Iustin Pop
6799 5c947f38 Iustin Pop
6800 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6801 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6802 73415719 Iustin Pop

6803 73415719 Iustin Pop
  """
6804 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6805 8646adce Guido Trotter
  REQ_BGL = False
6806 8646adce Guido Trotter
6807 8646adce Guido Trotter
  def ExpandNames(self):
6808 8646adce Guido Trotter
    self.needed_locks = {}
6809 73415719 Iustin Pop
6810 73415719 Iustin Pop
  def CheckPrereq(self):
6811 73415719 Iustin Pop
    """Check prerequisites.
6812 73415719 Iustin Pop

6813 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6814 73415719 Iustin Pop

6815 73415719 Iustin Pop
    """
6816 73415719 Iustin Pop
    try:
6817 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6818 73415719 Iustin Pop
    except re.error, err:
6819 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6820 73415719 Iustin Pop
                                 (self.op.pattern, err))
6821 73415719 Iustin Pop
6822 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6823 73415719 Iustin Pop
    """Returns the tag list.
6824 73415719 Iustin Pop

6825 73415719 Iustin Pop
    """
6826 73415719 Iustin Pop
    cfg = self.cfg
6827 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6828 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6829 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6830 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6831 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6832 73415719 Iustin Pop
    results = []
6833 73415719 Iustin Pop
    for path, target in tgts:
6834 73415719 Iustin Pop
      for tag in target.GetTags():
6835 73415719 Iustin Pop
        if self.re.search(tag):
6836 73415719 Iustin Pop
          results.append((path, tag))
6837 73415719 Iustin Pop
    return results
6838 73415719 Iustin Pop
6839 73415719 Iustin Pop
6840 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6841 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6842 5c947f38 Iustin Pop

6843 5c947f38 Iustin Pop
  """
6844 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6845 8646adce Guido Trotter
  REQ_BGL = False
6846 5c947f38 Iustin Pop
6847 5c947f38 Iustin Pop
  def CheckPrereq(self):
6848 5c947f38 Iustin Pop
    """Check prerequisites.
6849 5c947f38 Iustin Pop

6850 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6851 5c947f38 Iustin Pop

6852 5c947f38 Iustin Pop
    """
6853 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6854 f27302fa Iustin Pop
    for tag in self.op.tags:
6855 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6856 5c947f38 Iustin Pop
6857 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6858 5c947f38 Iustin Pop
    """Sets the tag.
6859 5c947f38 Iustin Pop

6860 5c947f38 Iustin Pop
    """
6861 5c947f38 Iustin Pop
    try:
6862 f27302fa Iustin Pop
      for tag in self.op.tags:
6863 f27302fa Iustin Pop
        self.target.AddTag(tag)
6864 5c947f38 Iustin Pop
    except errors.TagError, err:
6865 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6866 5c947f38 Iustin Pop
    try:
6867 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6868 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6869 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6870 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6871 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6872 5c947f38 Iustin Pop
6873 5c947f38 Iustin Pop
6874 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6875 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6876 5c947f38 Iustin Pop

6877 5c947f38 Iustin Pop
  """
6878 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6879 8646adce Guido Trotter
  REQ_BGL = False
6880 5c947f38 Iustin Pop
6881 5c947f38 Iustin Pop
  def CheckPrereq(self):
6882 5c947f38 Iustin Pop
    """Check prerequisites.
6883 5c947f38 Iustin Pop

6884 5c947f38 Iustin Pop
    This checks that we have the given tag.
6885 5c947f38 Iustin Pop

6886 5c947f38 Iustin Pop
    """
6887 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6888 f27302fa Iustin Pop
    for tag in self.op.tags:
6889 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6890 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6891 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6892 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6893 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6894 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6895 f27302fa Iustin Pop
      diff_names.sort()
6896 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6897 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6898 5c947f38 Iustin Pop
6899 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6900 5c947f38 Iustin Pop
    """Remove the tag from the object.
6901 5c947f38 Iustin Pop

6902 5c947f38 Iustin Pop
    """
6903 f27302fa Iustin Pop
    for tag in self.op.tags:
6904 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6905 5c947f38 Iustin Pop
    try:
6906 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6907 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6908 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6909 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6910 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6911 06009e27 Iustin Pop
6912 0eed6e61 Guido Trotter
6913 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6914 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6915 06009e27 Iustin Pop

6916 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6917 06009e27 Iustin Pop
  time.
6918 06009e27 Iustin Pop

6919 06009e27 Iustin Pop
  """
6920 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6921 fbe9022f Guido Trotter
  REQ_BGL = False
6922 06009e27 Iustin Pop
6923 fbe9022f Guido Trotter
  def ExpandNames(self):
6924 fbe9022f Guido Trotter
    """Expand names and set required locks.
6925 06009e27 Iustin Pop

6926 fbe9022f Guido Trotter
    This expands the node list, if any.
6927 06009e27 Iustin Pop

6928 06009e27 Iustin Pop
    """
6929 fbe9022f Guido Trotter
    self.needed_locks = {}
6930 06009e27 Iustin Pop
    if self.op.on_nodes:
6931 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6932 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6933 fbe9022f Guido Trotter
      # more information.
6934 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6935 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6936 fbe9022f Guido Trotter
6937 fbe9022f Guido Trotter
  def CheckPrereq(self):
6938 fbe9022f Guido Trotter
    """Check prerequisites.
6939 fbe9022f Guido Trotter

6940 fbe9022f Guido Trotter
    """
6941 06009e27 Iustin Pop
6942 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6943 06009e27 Iustin Pop
    """Do the actual sleep.
6944 06009e27 Iustin Pop

6945 06009e27 Iustin Pop
    """
6946 06009e27 Iustin Pop
    if self.op.on_master:
6947 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6948 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6949 06009e27 Iustin Pop
    if self.op.on_nodes:
6950 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6951 06009e27 Iustin Pop
      for node, node_result in result.items():
6952 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
6953 d61df03e Iustin Pop
6954 d61df03e Iustin Pop
6955 d1c2dd75 Iustin Pop
class IAllocator(object):
6956 d1c2dd75 Iustin Pop
  """IAllocator framework.
6957 d61df03e Iustin Pop

6958 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6959 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6960 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6961 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6962 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6963 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6964 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6965 d1c2dd75 Iustin Pop
      easy usage
6966 d61df03e Iustin Pop

6967 d61df03e Iustin Pop
  """
6968 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6969 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6970 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6971 d1c2dd75 Iustin Pop
    ]
6972 29859cb7 Iustin Pop
  _RELO_KEYS = [
6973 29859cb7 Iustin Pop
    "relocate_from",
6974 29859cb7 Iustin Pop
    ]
6975 d1c2dd75 Iustin Pop
6976 923ddac0 Michael Hanselmann
  def __init__(self, cfg, rpc, mode, name, **kwargs):
6977 923ddac0 Michael Hanselmann
    self.cfg = cfg
6978 923ddac0 Michael Hanselmann
    self.rpc = rpc
6979 d1c2dd75 Iustin Pop
    # init buffer variables
6980 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6981 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6982 29859cb7 Iustin Pop
    self.mode = mode
6983 29859cb7 Iustin Pop
    self.name = name
6984 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6985 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6986 a0add446 Iustin Pop
    self.hypervisor = None
6987 29859cb7 Iustin Pop
    self.relocate_from = None
6988 27579978 Iustin Pop
    # computed fields
6989 27579978 Iustin Pop
    self.required_nodes = None
6990 d1c2dd75 Iustin Pop
    # init result fields
6991 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6992 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6993 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6994 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6995 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6996 29859cb7 Iustin Pop
    else:
6997 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6998 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6999 d1c2dd75 Iustin Pop
    for key in kwargs:
7000 29859cb7 Iustin Pop
      if key not in keyset:
7001 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
7002 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
7003 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
7004 29859cb7 Iustin Pop
    for key in keyset:
7005 d1c2dd75 Iustin Pop
      if key not in kwargs:
7006 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
7007 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
7008 d1c2dd75 Iustin Pop
    self._BuildInputData()
7009 d1c2dd75 Iustin Pop
7010 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
7011 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
7012 d1c2dd75 Iustin Pop

7013 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
7014 d1c2dd75 Iustin Pop

7015 d1c2dd75 Iustin Pop
    """
7016 923ddac0 Michael Hanselmann
    cfg = self.cfg
7017 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
7018 d1c2dd75 Iustin Pop
    # cluster data
7019 d1c2dd75 Iustin Pop
    data = {
7020 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
7021 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
7022 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
7023 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
7024 d1c2dd75 Iustin Pop
      # we don't have job IDs
7025 d61df03e Iustin Pop
      }
7026 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
7027 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
7028 6286519f Iustin Pop
7029 d1c2dd75 Iustin Pop
    # node data
7030 d1c2dd75 Iustin Pop
    node_results = {}
7031 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
7032 8cc7e742 Guido Trotter
7033 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7034 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
7035 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
7036 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
7037 8cc7e742 Guido Trotter
7038 923ddac0 Michael Hanselmann
    node_data = self.rpc.call_node_info(node_list, cfg.GetVGName(),
7039 923ddac0 Michael Hanselmann
                                        hypervisor_name)
7040 923ddac0 Michael Hanselmann
    node_iinfo = \
7041 923ddac0 Michael Hanselmann
      self.rpc.call_all_instances_info(node_list,
7042 923ddac0 Michael Hanselmann
                                       cluster_info.enabled_hypervisors)
7043 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
7044 1325da74 Iustin Pop
      # first fill in static (config-based) values
7045 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
7046 d1c2dd75 Iustin Pop
      pnr = {
7047 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
7048 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
7049 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
7050 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
7051 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
7052 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
7053 d1c2dd75 Iustin Pop
        }
7054 1325da74 Iustin Pop
7055 1325da74 Iustin Pop
      if not ninfo.offline:
7056 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
7057 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
7058 4c4e4e1e Iustin Pop
                                nname)
7059 070e998b Iustin Pop
        remote_info = nresult.payload
7060 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
7061 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
7062 1325da74 Iustin Pop
          if attr not in remote_info:
7063 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
7064 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
7065 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
7066 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
7067 070e998b Iustin Pop
                                     " for '%s': %s" %
7068 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
7069 1325da74 Iustin Pop
        # compute memory used by primary instances
7070 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
7071 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
7072 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
7073 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
7074 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
7075 1325da74 Iustin Pop
              i_used_mem = 0
7076 1325da74 Iustin Pop
            else:
7077 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
7078 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
7079 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
7080 1325da74 Iustin Pop
7081 1325da74 Iustin Pop
            if iinfo.admin_up:
7082 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
7083 1325da74 Iustin Pop
7084 1325da74 Iustin Pop
        # compute memory used by instances
7085 1325da74 Iustin Pop
        pnr_dyn = {
7086 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
7087 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
7088 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
7089 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
7090 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
7091 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
7092 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
7093 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
7094 1325da74 Iustin Pop
          }
7095 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
7096 1325da74 Iustin Pop
7097 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
7098 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
7099 d1c2dd75 Iustin Pop
7100 d1c2dd75 Iustin Pop
    # instance data
7101 d1c2dd75 Iustin Pop
    instance_data = {}
7102 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
7103 a9fe7e8f Guido Trotter
      nic_data = []
7104 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
7105 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
7106 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
7107 a9fe7e8f Guido Trotter
            nic.nicparams)
7108 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
7109 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
7110 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
7111 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
7112 a9fe7e8f Guido Trotter
                   }
7113 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
7114 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
7115 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
7116 d1c2dd75 Iustin Pop
      pir = {
7117 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
7118 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
7119 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
7120 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
7121 d1c2dd75 Iustin Pop
        "os": iinfo.os,
7122 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
7123 d1c2dd75 Iustin Pop
        "nics": nic_data,
7124 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
7125 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
7126 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
7127 d1c2dd75 Iustin Pop
        }
7128 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
7129 88ae4f85 Iustin Pop
                                                 pir["disks"])
7130 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7131 d61df03e Iustin Pop
7132 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7133 d61df03e Iustin Pop
7134 d1c2dd75 Iustin Pop
    self.in_data = data
7135 d61df03e Iustin Pop
7136 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7137 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7138 d61df03e Iustin Pop

7139 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7140 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7141 d61df03e Iustin Pop

7142 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7143 d1c2dd75 Iustin Pop
    done.
7144 d61df03e Iustin Pop

7145 d1c2dd75 Iustin Pop
    """
7146 d1c2dd75 Iustin Pop
    data = self.in_data
7147 d1c2dd75 Iustin Pop
7148 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7149 d1c2dd75 Iustin Pop
7150 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7151 27579978 Iustin Pop
      self.required_nodes = 2
7152 27579978 Iustin Pop
    else:
7153 27579978 Iustin Pop
      self.required_nodes = 1
7154 d1c2dd75 Iustin Pop
    request = {
7155 d1c2dd75 Iustin Pop
      "type": "allocate",
7156 d1c2dd75 Iustin Pop
      "name": self.name,
7157 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7158 d1c2dd75 Iustin Pop
      "tags": self.tags,
7159 d1c2dd75 Iustin Pop
      "os": self.os,
7160 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7161 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7162 d1c2dd75 Iustin Pop
      "disks": self.disks,
7163 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7164 d1c2dd75 Iustin Pop
      "nics": self.nics,
7165 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7166 d1c2dd75 Iustin Pop
      }
7167 d1c2dd75 Iustin Pop
    data["request"] = request
7168 298fe380 Iustin Pop
7169 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7170 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7171 298fe380 Iustin Pop

7172 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7173 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7174 d61df03e Iustin Pop

7175 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7176 d1c2dd75 Iustin Pop
    done.
7177 d61df03e Iustin Pop

7178 d1c2dd75 Iustin Pop
    """
7179 923ddac0 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(self.name)
7180 27579978 Iustin Pop
    if instance is None:
7181 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7182 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7183 27579978 Iustin Pop
7184 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7185 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7186 27579978 Iustin Pop
7187 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7188 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7189 2a139bb0 Iustin Pop
7190 27579978 Iustin Pop
    self.required_nodes = 1
7191 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7192 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7193 27579978 Iustin Pop
7194 d1c2dd75 Iustin Pop
    request = {
7195 2a139bb0 Iustin Pop
      "type": "relocate",
7196 d1c2dd75 Iustin Pop
      "name": self.name,
7197 27579978 Iustin Pop
      "disk_space_total": disk_space,
7198 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7199 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7200 d1c2dd75 Iustin Pop
      }
7201 27579978 Iustin Pop
    self.in_data["request"] = request
7202 d61df03e Iustin Pop
7203 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7204 d1c2dd75 Iustin Pop
    """Build input data structures.
7205 d61df03e Iustin Pop

7206 d1c2dd75 Iustin Pop
    """
7207 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7208 d61df03e Iustin Pop
7209 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7210 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7211 d1c2dd75 Iustin Pop
    else:
7212 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7213 d61df03e Iustin Pop
7214 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7215 d61df03e Iustin Pop
7216 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7217 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7218 298fe380 Iustin Pop

7219 d1c2dd75 Iustin Pop
    """
7220 72737a7f Iustin Pop
    if call_fn is None:
7221 923ddac0 Michael Hanselmann
      call_fn = self.rpc.call_iallocator_runner
7222 298fe380 Iustin Pop
7223 923ddac0 Michael Hanselmann
    result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
7224 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
7225 8d528b7c Iustin Pop
7226 87f5c298 Iustin Pop
    self.out_text = result.payload
7227 d1c2dd75 Iustin Pop
    if validate:
7228 d1c2dd75 Iustin Pop
      self._ValidateResult()
7229 298fe380 Iustin Pop
7230 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7231 d1c2dd75 Iustin Pop
    """Process the allocator results.
7232 538475ca Iustin Pop

7233 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7234 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7235 538475ca Iustin Pop

7236 d1c2dd75 Iustin Pop
    """
7237 d1c2dd75 Iustin Pop
    try:
7238 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7239 d1c2dd75 Iustin Pop
    except Exception, err:
7240 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7241 d1c2dd75 Iustin Pop
7242 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7243 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7244 538475ca Iustin Pop
7245 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7246 d1c2dd75 Iustin Pop
      if key not in rdict:
7247 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7248 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7249 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7250 538475ca Iustin Pop
7251 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7252 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7253 d1c2dd75 Iustin Pop
                               " is not a list")
7254 d1c2dd75 Iustin Pop
    self.out_data = rdict
7255 538475ca Iustin Pop
7256 538475ca Iustin Pop
7257 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7258 d61df03e Iustin Pop
  """Run allocator tests.
7259 d61df03e Iustin Pop

7260 d61df03e Iustin Pop
  This LU runs the allocator tests
7261 d61df03e Iustin Pop

7262 d61df03e Iustin Pop
  """
7263 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7264 d61df03e Iustin Pop
7265 d61df03e Iustin Pop
  def CheckPrereq(self):
7266 d61df03e Iustin Pop
    """Check prerequisites.
7267 d61df03e Iustin Pop

7268 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7269 d61df03e Iustin Pop

7270 d61df03e Iustin Pop
    """
7271 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7272 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7273 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7274 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7275 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7276 d61df03e Iustin Pop
                                     attr)
7277 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7278 d61df03e Iustin Pop
      if iname is not None:
7279 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7280 d61df03e Iustin Pop
                                   iname)
7281 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7282 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7283 d61df03e Iustin Pop
      for row in self.op.nics:
7284 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7285 d61df03e Iustin Pop
            "mac" not in row or
7286 d61df03e Iustin Pop
            "ip" not in row or
7287 d61df03e Iustin Pop
            "bridge" not in row):
7288 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7289 d61df03e Iustin Pop
                                     " 'nics' parameter")
7290 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7291 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7292 d61df03e Iustin Pop
      for row in self.op.disks:
7293 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7294 d61df03e Iustin Pop
            "size" not in row or
7295 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7296 d61df03e Iustin Pop
            "mode" not in row or
7297 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7298 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7299 d61df03e Iustin Pop
                                     " 'disks' parameter")
7300 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7301 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7302 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7303 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7304 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7305 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7306 d61df03e Iustin Pop
      if fname is None:
7307 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7308 d61df03e Iustin Pop
                                   self.op.name)
7309 d61df03e Iustin Pop
      self.op.name = fname
7310 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7311 d61df03e Iustin Pop
    else:
7312 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7313 d61df03e Iustin Pop
                                 self.op.mode)
7314 d61df03e Iustin Pop
7315 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7316 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7317 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7318 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7319 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7320 d61df03e Iustin Pop
                                 self.op.direction)
7321 d61df03e Iustin Pop
7322 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7323 d61df03e Iustin Pop
    """Run the allocator test.
7324 d61df03e Iustin Pop

7325 d61df03e Iustin Pop
    """
7326 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7327 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
7328 29859cb7 Iustin Pop
                       mode=self.op.mode,
7329 29859cb7 Iustin Pop
                       name=self.op.name,
7330 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7331 29859cb7 Iustin Pop
                       disks=self.op.disks,
7332 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7333 29859cb7 Iustin Pop
                       os=self.op.os,
7334 29859cb7 Iustin Pop
                       tags=self.op.tags,
7335 29859cb7 Iustin Pop
                       nics=self.op.nics,
7336 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7337 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7338 29859cb7 Iustin Pop
                       )
7339 29859cb7 Iustin Pop
    else:
7340 923ddac0 Michael Hanselmann
      ial = IAllocator(self.cfg, self.rpc,
7341 29859cb7 Iustin Pop
                       mode=self.op.mode,
7342 29859cb7 Iustin Pop
                       name=self.op.name,
7343 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7344 29859cb7 Iustin Pop
                       )
7345 d61df03e Iustin Pop
7346 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7347 d1c2dd75 Iustin Pop
      result = ial.in_text
7348 298fe380 Iustin Pop
    else:
7349 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7350 d1c2dd75 Iustin Pop
      result = ial.out_text
7351 298fe380 Iustin Pop
    return result