Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 6dfad215

History | View | Annotate | Download (245 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import tempfile
30 a8083063 Iustin Pop
import re
31 a8083063 Iustin Pop
import platform
32 ffa1c0dc Iustin Pop
import logging
33 74409b12 Iustin Pop
import copy
34 4b7735f9 Iustin Pop
import random
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 112f18a5 Iustin Pop
from ganeti import ssconf
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 4be4691d Iustin Pop
    self.CheckArguments()
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 4be4691d Iustin Pop
  def CheckArguments(self):
111 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
112 4be4691d Iustin Pop

113 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
114 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
115 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
116 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
117 4be4691d Iustin Pop

118 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
119 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
120 4be4691d Iustin Pop
        waited for them)
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
123 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    """
126 4be4691d Iustin Pop
    pass
127 4be4691d Iustin Pop
128 d465bdc8 Guido Trotter
  def ExpandNames(self):
129 d465bdc8 Guido Trotter
    """Expand names for this LU.
130 d465bdc8 Guido Trotter

131 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
132 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
133 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
134 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
135 d465bdc8 Guido Trotter

136 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
137 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
138 d465bdc8 Guido Trotter
    as values. Rules:
139 e4376078 Iustin Pop

140 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
141 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
142 e4376078 Iustin Pop
      - don't put anything for the BGL level
143 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
144 d465bdc8 Guido Trotter

145 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
146 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
147 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
148 3977a4c1 Guido Trotter

149 e4376078 Iustin Pop
    Examples::
150 e4376078 Iustin Pop

151 e4376078 Iustin Pop
      # Acquire all nodes and one instance
152 e4376078 Iustin Pop
      self.needed_locks = {
153 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
154 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155 e4376078 Iustin Pop
      }
156 e4376078 Iustin Pop
      # Acquire just two nodes
157 e4376078 Iustin Pop
      self.needed_locks = {
158 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159 e4376078 Iustin Pop
      }
160 e4376078 Iustin Pop
      # Acquire no locks
161 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
162 d465bdc8 Guido Trotter

163 d465bdc8 Guido Trotter
    """
164 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
165 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
166 d465bdc8 Guido Trotter
    # time.
167 d465bdc8 Guido Trotter
    if self.REQ_BGL:
168 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
169 d465bdc8 Guido Trotter
    else:
170 d465bdc8 Guido Trotter
      raise NotImplementedError
171 d465bdc8 Guido Trotter
172 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
173 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
176 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
177 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
178 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
179 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
180 fb8dcb62 Guido Trotter
    default it does nothing.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
183 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
186 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    """
189 fb8dcb62 Guido Trotter
190 a8083063 Iustin Pop
  def CheckPrereq(self):
191 a8083063 Iustin Pop
    """Check prerequisites for this LU.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
194 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
195 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
196 a8083063 Iustin Pop
    allowed.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
199 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
202 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
208 a8083063 Iustin Pop
    """Execute the LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
211 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
212 a8083063 Iustin Pop
    code, or expected.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    """
215 a8083063 Iustin Pop
    raise NotImplementedError
216 a8083063 Iustin Pop
217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
218 a8083063 Iustin Pop
    """Build hooks environment for this LU.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
221 a8083063 Iustin Pop
    containing the environment that will be used for running the
222 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
223 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
224 a8083063 Iustin Pop
    the hook should run after the execution.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
227 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
228 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
229 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
230 a8083063 Iustin Pop

231 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
234 a8083063 Iustin Pop
    not be called.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    """
237 a8083063 Iustin Pop
    raise NotImplementedError
238 a8083063 Iustin Pop
239 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
243 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
244 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
245 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
246 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
247 1fce5219 Guido Trotter

248 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
251 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
252 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
253 e4376078 Iustin Pop
        in the PRE phase
254 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
255 e4376078 Iustin Pop
        and hook results
256 1fce5219 Guido Trotter

257 1fce5219 Guido Trotter
    """
258 1fce5219 Guido Trotter
    return lu_result
259 1fce5219 Guido Trotter
260 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
261 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
262 43905206 Guido Trotter

263 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
264 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
265 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
266 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
267 43905206 Guido Trotter
    before.
268 43905206 Guido Trotter

269 43905206 Guido Trotter
    """
270 43905206 Guido Trotter
    if self.needed_locks is None:
271 43905206 Guido Trotter
      self.needed_locks = {}
272 43905206 Guido Trotter
    else:
273 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
275 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 43905206 Guido Trotter
    if expanded_name is None:
277 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
278 43905206 Guido Trotter
                                  self.op.instance_name)
279 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 43905206 Guido Trotter
    self.op.instance_name = expanded_name
281 43905206 Guido Trotter
282 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
283 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
286 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
288 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
291 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
294 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
295 c4a2fee1 Guido Trotter

296 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
299 e4376078 Iustin Pop
        self._LockInstancesNodes()
300 c4a2fee1 Guido Trotter

301 a82ce292 Guido Trotter
    @type primary_only: boolean
302 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
303 a82ce292 Guido Trotter

304 c4a2fee1 Guido Trotter
    """
305 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
312 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
    wanted_nodes = []
314 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
317 a82ce292 Guido Trotter
      if not primary_only:
318 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
319 9513b6ab Guido Trotter
320 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324 c4a2fee1 Guido Trotter
325 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
326 c4a2fee1 Guido Trotter
327 a8083063 Iustin Pop
328 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
329 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
332 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  """
335 a8083063 Iustin Pop
  HPATH = None
336 a8083063 Iustin Pop
  HTYPE = None
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
339 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
341 83120a01 Michael Hanselmann

342 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
343 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
344 e4376078 Iustin Pop
  @type nodes: list
345 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
346 e4376078 Iustin Pop
  @rtype: list
347 e4376078 Iustin Pop
  @return: the list of nodes, sorted
348 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349 83120a01 Michael Hanselmann

350 83120a01 Michael Hanselmann
  """
351 3312b702 Iustin Pop
  if not isinstance(nodes, list):
352 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353 dcb93971 Michael Hanselmann
354 ea47808a Guido Trotter
  if not nodes:
355 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
357 dcb93971 Michael Hanselmann
358 ea47808a Guido Trotter
  wanted = []
359 ea47808a Guido Trotter
  for name in nodes:
360 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
361 ea47808a Guido Trotter
    if node is None:
362 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
363 ea47808a Guido Trotter
    wanted.append(node)
364 dcb93971 Michael Hanselmann
365 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
370 3312b702 Iustin Pop

371 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
372 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
373 e4376078 Iustin Pop
  @type instances: list
374 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
375 e4376078 Iustin Pop
  @rtype: list
376 e4376078 Iustin Pop
  @return: the list of instances, sorted
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
379 3312b702 Iustin Pop

380 3312b702 Iustin Pop
  """
381 3312b702 Iustin Pop
  if not isinstance(instances, list):
382 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
  if instances:
385 3312b702 Iustin Pop
    wanted = []
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
    for name in instances:
388 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
389 3312b702 Iustin Pop
      if instance is None:
390 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391 3312b702 Iustin Pop
      wanted.append(instance)
392 3312b702 Iustin Pop
393 3312b702 Iustin Pop
  else:
394 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395 a7f5dc98 Iustin Pop
  return wanted
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
399 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
400 83120a01 Michael Hanselmann

401 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
402 31bf511f Iustin Pop
  @param static: static fields set
403 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
405 83120a01 Michael Hanselmann

406 83120a01 Michael Hanselmann
  """
407 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
408 31bf511f Iustin Pop
  f.Extend(static)
409 31bf511f Iustin Pop
  f.Extend(dynamic)
410 dcb93971 Michael Hanselmann
411 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
412 31bf511f Iustin Pop
  if delta:
413 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414 31bf511f Iustin Pop
                               % ",".join(delta))
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
417 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
418 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
421 a5961235 Iustin Pop
  or None (but that it always exists).
422 a5961235 Iustin Pop

423 a5961235 Iustin Pop
  """
424 a5961235 Iustin Pop
  val = getattr(op, name, None)
425 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
426 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427 a5961235 Iustin Pop
                               (name, str(val)))
428 a5961235 Iustin Pop
  setattr(op, name, val)
429 a5961235 Iustin Pop
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
432 a5961235 Iustin Pop
  """Ensure that a given node is online.
433 a5961235 Iustin Pop

434 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
435 a5961235 Iustin Pop
  @param node: the node to check
436 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
437 a5961235 Iustin Pop

438 a5961235 Iustin Pop
  """
439 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
440 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441 a5961235 Iustin Pop
442 a5961235 Iustin Pop
443 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
444 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
445 733a2b6a Iustin Pop

446 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
447 733a2b6a Iustin Pop
  @param node: the node to check
448 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
449 733a2b6a Iustin Pop

450 733a2b6a Iustin Pop
  """
451 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
452 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453 733a2b6a Iustin Pop
454 733a2b6a Iustin Pop
455 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
457 67fc3042 Iustin Pop
                          bep, hvp, hypervisor):
458 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
459 e4376078 Iustin Pop

460 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  @type name: string
463 e4376078 Iustin Pop
  @param name: the name of the instance
464 e4376078 Iustin Pop
  @type primary_node: string
465 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
466 e4376078 Iustin Pop
  @type secondary_nodes: list
467 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
468 e4376078 Iustin Pop
  @type os_type: string
469 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
470 0d68c45d Iustin Pop
  @type status: boolean
471 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
472 e4376078 Iustin Pop
  @type memory: string
473 e4376078 Iustin Pop
  @param memory: the memory size of the instance
474 e4376078 Iustin Pop
  @type vcpus: string
475 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
476 e4376078 Iustin Pop
  @type nics: list
477 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
478 e4376078 Iustin Pop
      the NICs the instance  has
479 2c2690c9 Iustin Pop
  @type disk_template: string
480 2c2690c9 Iustin Pop
  @param disk_template: the distk template of the instance
481 2c2690c9 Iustin Pop
  @type disks: list
482 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
483 67fc3042 Iustin Pop
  @type bep: dict
484 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
485 67fc3042 Iustin Pop
  @type hvp: dict
486 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
487 67fc3042 Iustin Pop
  @type hypervisor: string
488 67fc3042 Iustin Pop
  @param hypervisor: the hypervisor for the instance
489 e4376078 Iustin Pop
  @rtype: dict
490 e4376078 Iustin Pop
  @return: the hook environment for this instance
491 ecb215b5 Michael Hanselmann

492 396e1b78 Michael Hanselmann
  """
493 0d68c45d Iustin Pop
  if status:
494 0d68c45d Iustin Pop
    str_status = "up"
495 0d68c45d Iustin Pop
  else:
496 0d68c45d Iustin Pop
    str_status = "down"
497 396e1b78 Michael Hanselmann
  env = {
498 0e137c28 Iustin Pop
    "OP_TARGET": name,
499 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
500 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
501 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
502 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
503 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
504 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
505 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
506 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
507 67fc3042 Iustin Pop
    "INSTANCE_HYPERVISOR": hypervisor,
508 396e1b78 Michael Hanselmann
  }
509 396e1b78 Michael Hanselmann
510 396e1b78 Michael Hanselmann
  if nics:
511 396e1b78 Michael Hanselmann
    nic_count = len(nics)
512 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
513 396e1b78 Michael Hanselmann
      if ip is None:
514 396e1b78 Michael Hanselmann
        ip = ""
515 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
516 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
517 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
518 396e1b78 Michael Hanselmann
  else:
519 396e1b78 Michael Hanselmann
    nic_count = 0
520 396e1b78 Michael Hanselmann
521 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
522 396e1b78 Michael Hanselmann
523 2c2690c9 Iustin Pop
  if disks:
524 2c2690c9 Iustin Pop
    disk_count = len(disks)
525 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
526 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
527 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
528 2c2690c9 Iustin Pop
  else:
529 2c2690c9 Iustin Pop
    disk_count = 0
530 2c2690c9 Iustin Pop
531 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
532 2c2690c9 Iustin Pop
533 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
534 67fc3042 Iustin Pop
    for key, value in source.items():
535 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
536 67fc3042 Iustin Pop
537 396e1b78 Michael Hanselmann
  return env
538 396e1b78 Michael Hanselmann
539 396e1b78 Michael Hanselmann
540 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
541 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
542 ecb215b5 Michael Hanselmann

543 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
544 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
545 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
546 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
547 e4376078 Iustin Pop
      environment
548 e4376078 Iustin Pop
  @type override: dict
549 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
550 e4376078 Iustin Pop
      our values
551 e4376078 Iustin Pop
  @rtype: dict
552 e4376078 Iustin Pop
  @return: the hook environment dictionary
553 e4376078 Iustin Pop

554 ecb215b5 Michael Hanselmann
  """
555 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
556 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
557 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
558 396e1b78 Michael Hanselmann
  args = {
559 396e1b78 Michael Hanselmann
    'name': instance.name,
560 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
561 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
562 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
563 0d68c45d Iustin Pop
    'status': instance.admin_up,
564 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
565 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
566 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
567 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
568 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
569 67fc3042 Iustin Pop
    'bep': bep,
570 67fc3042 Iustin Pop
    'hvp': hvp,
571 67fc3042 Iustin Pop
    'hypervisor': instance.hypervisor,
572 396e1b78 Michael Hanselmann
  }
573 396e1b78 Michael Hanselmann
  if override:
574 396e1b78 Michael Hanselmann
    args.update(override)
575 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
576 396e1b78 Michael Hanselmann
577 396e1b78 Michael Hanselmann
578 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
579 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
580 ec0292f1 Iustin Pop

581 ec0292f1 Iustin Pop
  """
582 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
583 ec0292f1 Iustin Pop
  if mod_list:
584 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
585 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
586 ec0292f1 Iustin Pop
    for name in mod_list:
587 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
588 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
589 ec0292f1 Iustin Pop
  if mc_now > mc_max:
590 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
591 ec0292f1 Iustin Pop
               (mc_now, mc_max))
592 ec0292f1 Iustin Pop
593 ec0292f1 Iustin Pop
594 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
595 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
596 bf6929a2 Alexander Schreiber

597 bf6929a2 Alexander Schreiber
  """
598 bf6929a2 Alexander Schreiber
  # check bridges existance
599 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
600 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
601 781de953 Iustin Pop
  result.Raise()
602 781de953 Iustin Pop
  if not result.data:
603 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
604 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
605 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
606 bf6929a2 Alexander Schreiber
607 bf6929a2 Alexander Schreiber
608 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
609 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
610 a8083063 Iustin Pop

611 a8083063 Iustin Pop
  """
612 a8083063 Iustin Pop
  _OP_REQP = []
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
  def CheckPrereq(self):
615 a8083063 Iustin Pop
    """Check prerequisites.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    This checks whether the cluster is empty.
618 a8083063 Iustin Pop

619 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    """
622 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
625 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
626 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
627 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
628 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
629 db915bd1 Michael Hanselmann
    if instancelist:
630 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
631 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
634 a8083063 Iustin Pop
    """Destroys the cluster.
635 a8083063 Iustin Pop

636 a8083063 Iustin Pop
    """
637 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
638 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
639 781de953 Iustin Pop
    result.Raise()
640 781de953 Iustin Pop
    if not result.data:
641 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
642 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
643 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
644 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
645 140aa4a8 Iustin Pop
    return master
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
648 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
649 a8083063 Iustin Pop
  """Verifies the cluster status.
650 a8083063 Iustin Pop

651 a8083063 Iustin Pop
  """
652 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
653 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
654 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
655 d4b9d97f Guido Trotter
  REQ_BGL = False
656 d4b9d97f Guido Trotter
657 d4b9d97f Guido Trotter
  def ExpandNames(self):
658 d4b9d97f Guido Trotter
    self.needed_locks = {
659 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
660 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
661 d4b9d97f Guido Trotter
    }
662 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
663 a8083063 Iustin Pop
664 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
665 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
666 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
667 a8083063 Iustin Pop
    """Run multiple tests against a node.
668 a8083063 Iustin Pop

669 112f18a5 Iustin Pop
    Test list:
670 e4376078 Iustin Pop

671 a8083063 Iustin Pop
      - compares ganeti version
672 a8083063 Iustin Pop
      - checks vg existance and size > 20G
673 a8083063 Iustin Pop
      - checks config file checksum
674 a8083063 Iustin Pop
      - checks ssh to other nodes
675 a8083063 Iustin Pop

676 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
677 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
678 e4376078 Iustin Pop
    @param file_list: required list of files
679 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
680 e4376078 Iustin Pop
    @param node_result: the results from the node
681 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
682 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
683 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
684 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
685 6d2e83d5 Iustin Pop
        and their running status
686 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
687 098c0958 Michael Hanselmann

688 a8083063 Iustin Pop
    """
689 112f18a5 Iustin Pop
    node = nodeinfo.name
690 25361b9a Iustin Pop
691 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
692 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
693 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
694 25361b9a Iustin Pop
      return True
695 25361b9a Iustin Pop
696 a8083063 Iustin Pop
    # compares ganeti version
697 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
698 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
699 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
700 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
701 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
702 a8083063 Iustin Pop
      return True
703 a8083063 Iustin Pop
704 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
705 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
706 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
707 a8083063 Iustin Pop
      return True
708 a8083063 Iustin Pop
709 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
710 a8083063 Iustin Pop
711 a8083063 Iustin Pop
    bad = False
712 e9ce0a64 Iustin Pop
713 e9ce0a64 Iustin Pop
    # full package version
714 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
715 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
716 e9ce0a64 Iustin Pop
                  " node %s %s" %
717 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
718 e9ce0a64 Iustin Pop
719 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
720 cc9e1230 Guido Trotter
    if vg_name is not None:
721 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
722 cc9e1230 Guido Trotter
      if not vglist:
723 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
724 cc9e1230 Guido Trotter
                        (node,))
725 a8083063 Iustin Pop
        bad = True
726 cc9e1230 Guido Trotter
      else:
727 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
728 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
729 cc9e1230 Guido Trotter
        if vgstatus:
730 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
731 cc9e1230 Guido Trotter
          bad = True
732 a8083063 Iustin Pop
733 a8083063 Iustin Pop
    # checks config file checksum
734 a8083063 Iustin Pop
735 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
736 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
737 a8083063 Iustin Pop
      bad = True
738 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
739 a8083063 Iustin Pop
    else:
740 a8083063 Iustin Pop
      for file_name in file_list:
741 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
742 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
743 a8083063 Iustin Pop
        if file_name not in remote_cksum:
744 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
745 112f18a5 Iustin Pop
            bad = True
746 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
747 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
748 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
749 112f18a5 Iustin Pop
            bad = True
750 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
751 112f18a5 Iustin Pop
          else:
752 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
753 112f18a5 Iustin Pop
            bad = True
754 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
755 112f18a5 Iustin Pop
                        " '%s'" % file_name)
756 112f18a5 Iustin Pop
        else:
757 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
758 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
759 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
760 112f18a5 Iustin Pop
                        " candidates" % file_name)
761 a8083063 Iustin Pop
762 25361b9a Iustin Pop
    # checks ssh to any
763 25361b9a Iustin Pop
764 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
765 a8083063 Iustin Pop
      bad = True
766 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
767 a8083063 Iustin Pop
    else:
768 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
769 a8083063 Iustin Pop
        bad = True
770 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
771 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
772 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
773 25361b9a Iustin Pop
774 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
775 9d4bfc96 Iustin Pop
      bad = True
776 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
777 9d4bfc96 Iustin Pop
    else:
778 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
779 9d4bfc96 Iustin Pop
        bad = True
780 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
781 9d4bfc96 Iustin Pop
        for node in nlist:
782 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
783 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
784 9d4bfc96 Iustin Pop
785 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
786 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
787 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
788 e69d05fd Iustin Pop
        if hv_result is not None:
789 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
790 e69d05fd Iustin Pop
                      (hv_name, hv_result))
791 6d2e83d5 Iustin Pop
792 6d2e83d5 Iustin Pop
    # check used drbd list
793 cc9e1230 Guido Trotter
    if vg_name is not None:
794 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
795 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
796 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
797 cc9e1230 Guido Trotter
                    str(used_minors))
798 cc9e1230 Guido Trotter
      else:
799 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
800 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
801 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
802 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
803 cc9e1230 Guido Trotter
            bad = True
804 cc9e1230 Guido Trotter
        for minor in used_minors:
805 cc9e1230 Guido Trotter
          if minor not in drbd_map:
806 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
807 35e994e9 Iustin Pop
                        minor)
808 cc9e1230 Guido Trotter
            bad = True
809 6d2e83d5 Iustin Pop
810 a8083063 Iustin Pop
    return bad
811 a8083063 Iustin Pop
812 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
813 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
814 a8083063 Iustin Pop
    """Verify an instance.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    This function checks to see if the required block devices are
817 a8083063 Iustin Pop
    available on the instance's node.
818 a8083063 Iustin Pop

819 a8083063 Iustin Pop
    """
820 a8083063 Iustin Pop
    bad = False
821 a8083063 Iustin Pop
822 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    node_vol_should = {}
825 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    for node in node_vol_should:
828 0a66c968 Iustin Pop
      if node in n_offline:
829 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
830 0a66c968 Iustin Pop
        continue
831 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
832 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
833 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
834 a8083063 Iustin Pop
                          (volume, node))
835 a8083063 Iustin Pop
          bad = True
836 a8083063 Iustin Pop
837 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
838 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
839 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
840 0a66c968 Iustin Pop
          node_current not in n_offline):
841 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
842 a8083063 Iustin Pop
                        (instance, node_current))
843 a8083063 Iustin Pop
        bad = True
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
    for node in node_instance:
846 a8083063 Iustin Pop
      if (not node == node_current):
847 a8083063 Iustin Pop
        if instance in node_instance[node]:
848 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
849 a8083063 Iustin Pop
                          (instance, node))
850 a8083063 Iustin Pop
          bad = True
851 a8083063 Iustin Pop
852 6a438c98 Michael Hanselmann
    return bad
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
855 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
858 a8083063 Iustin Pop
    reported as unknown.
859 a8083063 Iustin Pop

860 a8083063 Iustin Pop
    """
861 a8083063 Iustin Pop
    bad = False
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
    for node in node_vol_is:
864 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
865 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
866 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
867 a8083063 Iustin Pop
                      (volume, node))
868 a8083063 Iustin Pop
          bad = True
869 a8083063 Iustin Pop
    return bad
870 a8083063 Iustin Pop
871 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
872 a8083063 Iustin Pop
    """Verify the list of running instances.
873 a8083063 Iustin Pop

874 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
875 a8083063 Iustin Pop

876 a8083063 Iustin Pop
    """
877 a8083063 Iustin Pop
    bad = False
878 a8083063 Iustin Pop
    for node in node_instance:
879 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
880 a8083063 Iustin Pop
        if runninginstance not in instancelist:
881 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
882 a8083063 Iustin Pop
                          (runninginstance, node))
883 a8083063 Iustin Pop
          bad = True
884 a8083063 Iustin Pop
    return bad
885 a8083063 Iustin Pop
886 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
887 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
888 2b3b6ddd Guido Trotter

889 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
890 2b3b6ddd Guido Trotter
    was primary for.
891 2b3b6ddd Guido Trotter

892 2b3b6ddd Guido Trotter
    """
893 2b3b6ddd Guido Trotter
    bad = False
894 2b3b6ddd Guido Trotter
895 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
896 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
897 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
898 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
899 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
900 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
901 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
902 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
903 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
904 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
905 2b3b6ddd Guido Trotter
        needed_mem = 0
906 2b3b6ddd Guido Trotter
        for instance in instances:
907 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
908 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
909 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
910 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
911 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
912 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
913 2b3b6ddd Guido Trotter
          bad = True
914 2b3b6ddd Guido Trotter
    return bad
915 2b3b6ddd Guido Trotter
916 a8083063 Iustin Pop
  def CheckPrereq(self):
917 a8083063 Iustin Pop
    """Check prerequisites.
918 a8083063 Iustin Pop

919 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
920 e54c4c5e Guido Trotter
    all its members are valid.
921 a8083063 Iustin Pop

922 a8083063 Iustin Pop
    """
923 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
924 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
925 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
926 a8083063 Iustin Pop
927 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
928 d8fff41c Guido Trotter
    """Build hooks env.
929 d8fff41c Guido Trotter

930 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
931 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
932 d8fff41c Guido Trotter

933 d8fff41c Guido Trotter
    """
934 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
935 35e994e9 Iustin Pop
    env = {
936 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
937 35e994e9 Iustin Pop
      }
938 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
939 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
940 35e994e9 Iustin Pop
941 d8fff41c Guido Trotter
    return env, [], all_nodes
942 d8fff41c Guido Trotter
943 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
944 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
945 a8083063 Iustin Pop

946 a8083063 Iustin Pop
    """
947 a8083063 Iustin Pop
    bad = False
948 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
949 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
950 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
951 a8083063 Iustin Pop
952 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
953 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
954 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
955 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
956 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
957 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
958 6d2e83d5 Iustin Pop
                        for iname in instancelist)
959 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
960 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
961 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
962 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
963 a8083063 Iustin Pop
    node_volume = {}
964 a8083063 Iustin Pop
    node_instance = {}
965 9c9c7d30 Guido Trotter
    node_info = {}
966 26b6af5e Guido Trotter
    instance_cfg = {}
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
    # FIXME: verify OS list
969 a8083063 Iustin Pop
    # do local checksums
970 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
971 112f18a5 Iustin Pop
972 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
973 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
974 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
975 112f18a5 Iustin Pop
    file_names.extend(master_files)
976 112f18a5 Iustin Pop
977 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
978 a8083063 Iustin Pop
979 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
980 a8083063 Iustin Pop
    node_verify_param = {
981 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
982 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
983 82e37788 Iustin Pop
                              if not node.offline],
984 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
985 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
986 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
987 82e37788 Iustin Pop
                                 if not node.offline],
988 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
989 25361b9a Iustin Pop
      constants.NV_VERSION: None,
990 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
991 a8083063 Iustin Pop
      }
992 cc9e1230 Guido Trotter
    if vg_name is not None:
993 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
994 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
995 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
996 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
997 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
998 a8083063 Iustin Pop
999 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1000 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1001 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1002 6d2e83d5 Iustin Pop
1003 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1004 112f18a5 Iustin Pop
      node = node_i.name
1005 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
1006 25361b9a Iustin Pop
1007 0a66c968 Iustin Pop
      if node_i.offline:
1008 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1009 0a66c968 Iustin Pop
        n_offline.append(node)
1010 0a66c968 Iustin Pop
        continue
1011 0a66c968 Iustin Pop
1012 112f18a5 Iustin Pop
      if node == master_node:
1013 25361b9a Iustin Pop
        ntype = "master"
1014 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1015 25361b9a Iustin Pop
        ntype = "master candidate"
1016 22f0f71d Iustin Pop
      elif node_i.drained:
1017 22f0f71d Iustin Pop
        ntype = "drained"
1018 22f0f71d Iustin Pop
        n_drained.append(node)
1019 112f18a5 Iustin Pop
      else:
1020 25361b9a Iustin Pop
        ntype = "regular"
1021 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1022 25361b9a Iustin Pop
1023 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
1024 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
1025 25361b9a Iustin Pop
        bad = True
1026 25361b9a Iustin Pop
        continue
1027 25361b9a Iustin Pop
1028 6d2e83d5 Iustin Pop
      node_drbd = {}
1029 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1030 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1031 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1032 c614e5fb Iustin Pop
                      instance)
1033 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1034 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1035 c614e5fb Iustin Pop
          # unallocated minor in use)
1036 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1037 c614e5fb Iustin Pop
        else:
1038 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1039 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1040 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1041 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1042 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1043 a8083063 Iustin Pop
      bad = bad or result
1044 a8083063 Iustin Pop
1045 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1046 cc9e1230 Guido Trotter
      if vg_name is None:
1047 cc9e1230 Guido Trotter
        node_volume[node] = {}
1048 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1049 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1050 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1051 b63ed789 Iustin Pop
        bad = True
1052 b63ed789 Iustin Pop
        node_volume[node] = {}
1053 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1054 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1055 a8083063 Iustin Pop
        bad = True
1056 a8083063 Iustin Pop
        continue
1057 b63ed789 Iustin Pop
      else:
1058 25361b9a Iustin Pop
        node_volume[node] = lvdata
1059 a8083063 Iustin Pop
1060 a8083063 Iustin Pop
      # node_instance
1061 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1062 25361b9a Iustin Pop
      if not isinstance(idata, list):
1063 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1064 25361b9a Iustin Pop
                    (node,))
1065 a8083063 Iustin Pop
        bad = True
1066 a8083063 Iustin Pop
        continue
1067 a8083063 Iustin Pop
1068 25361b9a Iustin Pop
      node_instance[node] = idata
1069 a8083063 Iustin Pop
1070 9c9c7d30 Guido Trotter
      # node_info
1071 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1072 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1073 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1074 9c9c7d30 Guido Trotter
        bad = True
1075 9c9c7d30 Guido Trotter
        continue
1076 9c9c7d30 Guido Trotter
1077 9c9c7d30 Guido Trotter
      try:
1078 9c9c7d30 Guido Trotter
        node_info[node] = {
1079 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1080 93e4c50b Guido Trotter
          "pinst": [],
1081 93e4c50b Guido Trotter
          "sinst": [],
1082 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1083 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1084 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1085 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1086 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1087 36e7da50 Guido Trotter
          # secondary.
1088 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1089 9c9c7d30 Guido Trotter
        }
1090 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1091 cc9e1230 Guido Trotter
        if vg_name is not None:
1092 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1093 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1094 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1095 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1096 9a198532 Iustin Pop
                        (node, vg_name))
1097 9a198532 Iustin Pop
            bad = True
1098 9a198532 Iustin Pop
            continue
1099 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1100 9a198532 Iustin Pop
      except (ValueError, KeyError):
1101 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1102 9a198532 Iustin Pop
                    " from node %s" % (node,))
1103 9c9c7d30 Guido Trotter
        bad = True
1104 9c9c7d30 Guido Trotter
        continue
1105 9c9c7d30 Guido Trotter
1106 a8083063 Iustin Pop
    node_vol_should = {}
1107 a8083063 Iustin Pop
1108 a8083063 Iustin Pop
    for instance in instancelist:
1109 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1110 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1111 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1112 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1113 c5705f58 Guido Trotter
      bad = bad or result
1114 832261fd Iustin Pop
      inst_nodes_offline = []
1115 a8083063 Iustin Pop
1116 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1117 a8083063 Iustin Pop
1118 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1119 26b6af5e Guido Trotter
1120 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1121 93e4c50b Guido Trotter
      if pnode in node_info:
1122 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1123 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1124 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1125 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1126 93e4c50b Guido Trotter
        bad = True
1127 93e4c50b Guido Trotter
1128 832261fd Iustin Pop
      if pnode in n_offline:
1129 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1130 832261fd Iustin Pop
1131 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1132 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1133 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1134 93e4c50b Guido Trotter
      # supported either.
1135 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1136 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1137 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1138 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1139 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1140 93e4c50b Guido Trotter
                    % instance)
1141 93e4c50b Guido Trotter
1142 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1143 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1144 3924700f Iustin Pop
1145 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1146 93e4c50b Guido Trotter
        if snode in node_info:
1147 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1148 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1149 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1150 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1151 0a66c968 Iustin Pop
        elif snode not in n_offline:
1152 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1153 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1154 832261fd Iustin Pop
          bad = True
1155 832261fd Iustin Pop
        if snode in n_offline:
1156 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1157 832261fd Iustin Pop
1158 832261fd Iustin Pop
      if inst_nodes_offline:
1159 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1160 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1161 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1162 832261fd Iustin Pop
        bad = True
1163 93e4c50b Guido Trotter
1164 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1165 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1166 a8083063 Iustin Pop
                                       feedback_fn)
1167 a8083063 Iustin Pop
    bad = bad or result
1168 a8083063 Iustin Pop
1169 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1170 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1171 a8083063 Iustin Pop
                                         feedback_fn)
1172 a8083063 Iustin Pop
    bad = bad or result
1173 a8083063 Iustin Pop
1174 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1175 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1176 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1177 e54c4c5e Guido Trotter
      bad = bad or result
1178 2b3b6ddd Guido Trotter
1179 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1180 2b3b6ddd Guido Trotter
    if i_non_redundant:
1181 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1182 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1183 2b3b6ddd Guido Trotter
1184 3924700f Iustin Pop
    if i_non_a_balanced:
1185 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1186 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1187 3924700f Iustin Pop
1188 0a66c968 Iustin Pop
    if n_offline:
1189 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1190 0a66c968 Iustin Pop
1191 22f0f71d Iustin Pop
    if n_drained:
1192 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1193 22f0f71d Iustin Pop
1194 34290825 Michael Hanselmann
    return not bad
1195 a8083063 Iustin Pop
1196 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1197 e4376078 Iustin Pop
    """Analize the post-hooks' result
1198 e4376078 Iustin Pop

1199 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1200 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1201 d8fff41c Guido Trotter

1202 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1203 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1204 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1205 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1206 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1207 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1208 e4376078 Iustin Pop
        and hook results
1209 d8fff41c Guido Trotter

1210 d8fff41c Guido Trotter
    """
1211 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1212 38206f3c Iustin Pop
    # their results
1213 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1214 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1215 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1216 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1217 d8fff41c Guido Trotter
      if not hooks_results:
1218 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1219 d8fff41c Guido Trotter
        lu_result = 1
1220 d8fff41c Guido Trotter
      else:
1221 d8fff41c Guido Trotter
        for node_name in hooks_results:
1222 d8fff41c Guido Trotter
          show_node_header = True
1223 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1224 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1225 0a66c968 Iustin Pop
            if res.offline:
1226 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1227 0a66c968 Iustin Pop
              continue
1228 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1229 d8fff41c Guido Trotter
            lu_result = 1
1230 d8fff41c Guido Trotter
            continue
1231 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1232 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1233 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1234 d8fff41c Guido Trotter
              # failing hooks on that node
1235 d8fff41c Guido Trotter
              if show_node_header:
1236 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1237 d8fff41c Guido Trotter
                show_node_header = False
1238 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1239 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1240 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1241 d8fff41c Guido Trotter
              lu_result = 1
1242 d8fff41c Guido Trotter
1243 d8fff41c Guido Trotter
      return lu_result
1244 d8fff41c Guido Trotter
1245 a8083063 Iustin Pop
1246 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1247 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1248 2c95a8d4 Iustin Pop

1249 2c95a8d4 Iustin Pop
  """
1250 2c95a8d4 Iustin Pop
  _OP_REQP = []
1251 d4b9d97f Guido Trotter
  REQ_BGL = False
1252 d4b9d97f Guido Trotter
1253 d4b9d97f Guido Trotter
  def ExpandNames(self):
1254 d4b9d97f Guido Trotter
    self.needed_locks = {
1255 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1256 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1257 d4b9d97f Guido Trotter
    }
1258 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1259 2c95a8d4 Iustin Pop
1260 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1261 2c95a8d4 Iustin Pop
    """Check prerequisites.
1262 2c95a8d4 Iustin Pop

1263 2c95a8d4 Iustin Pop
    This has no prerequisites.
1264 2c95a8d4 Iustin Pop

1265 2c95a8d4 Iustin Pop
    """
1266 2c95a8d4 Iustin Pop
    pass
1267 2c95a8d4 Iustin Pop
1268 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1269 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1270 2c95a8d4 Iustin Pop

1271 2c95a8d4 Iustin Pop
    """
1272 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1273 2c95a8d4 Iustin Pop
1274 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1275 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1276 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1277 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1278 2c95a8d4 Iustin Pop
1279 2c95a8d4 Iustin Pop
    nv_dict = {}
1280 2c95a8d4 Iustin Pop
    for inst in instances:
1281 2c95a8d4 Iustin Pop
      inst_lvs = {}
1282 0d68c45d Iustin Pop
      if (not inst.admin_up or
1283 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1284 2c95a8d4 Iustin Pop
        continue
1285 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1286 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1287 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1288 2c95a8d4 Iustin Pop
        for vol in vol_list:
1289 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1290 2c95a8d4 Iustin Pop
1291 2c95a8d4 Iustin Pop
    if not nv_dict:
1292 2c95a8d4 Iustin Pop
      return result
1293 2c95a8d4 Iustin Pop
1294 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1295 2c95a8d4 Iustin Pop
1296 2c95a8d4 Iustin Pop
    to_act = set()
1297 2c95a8d4 Iustin Pop
    for node in nodes:
1298 2c95a8d4 Iustin Pop
      # node_volume
1299 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1300 781de953 Iustin Pop
      if lvs.failed:
1301 0a66c968 Iustin Pop
        if not lvs.offline:
1302 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1303 0a66c968 Iustin Pop
                          (node, lvs.data))
1304 781de953 Iustin Pop
        continue
1305 781de953 Iustin Pop
      lvs = lvs.data
1306 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1307 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1308 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1309 ea9ddc07 Iustin Pop
        continue
1310 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1311 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1312 9a4f63d1 Iustin Pop
                        " returned", node)
1313 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1314 2c95a8d4 Iustin Pop
        continue
1315 2c95a8d4 Iustin Pop
1316 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1317 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1318 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1319 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1320 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1321 2c95a8d4 Iustin Pop
1322 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1323 b63ed789 Iustin Pop
    # data better
1324 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1325 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1326 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1327 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1328 b63ed789 Iustin Pop
1329 2c95a8d4 Iustin Pop
    return result
1330 2c95a8d4 Iustin Pop
1331 2c95a8d4 Iustin Pop
1332 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1333 07bd8a51 Iustin Pop
  """Rename the cluster.
1334 07bd8a51 Iustin Pop

1335 07bd8a51 Iustin Pop
  """
1336 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1337 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1338 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1339 07bd8a51 Iustin Pop
1340 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1341 07bd8a51 Iustin Pop
    """Build hooks env.
1342 07bd8a51 Iustin Pop

1343 07bd8a51 Iustin Pop
    """
1344 07bd8a51 Iustin Pop
    env = {
1345 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1346 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1347 07bd8a51 Iustin Pop
      }
1348 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1349 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1350 07bd8a51 Iustin Pop
1351 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1352 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1353 07bd8a51 Iustin Pop

1354 07bd8a51 Iustin Pop
    """
1355 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1356 07bd8a51 Iustin Pop
1357 bcf043c9 Iustin Pop
    new_name = hostname.name
1358 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1359 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1360 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1361 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1362 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1363 07bd8a51 Iustin Pop
                                 " cluster has changed")
1364 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1365 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1366 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1367 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1368 07bd8a51 Iustin Pop
                                   new_ip)
1369 07bd8a51 Iustin Pop
1370 07bd8a51 Iustin Pop
    self.op.name = new_name
1371 07bd8a51 Iustin Pop
1372 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1373 07bd8a51 Iustin Pop
    """Rename the cluster.
1374 07bd8a51 Iustin Pop

1375 07bd8a51 Iustin Pop
    """
1376 07bd8a51 Iustin Pop
    clustername = self.op.name
1377 07bd8a51 Iustin Pop
    ip = self.ip
1378 07bd8a51 Iustin Pop
1379 07bd8a51 Iustin Pop
    # shutdown the master IP
1380 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1381 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1382 781de953 Iustin Pop
    if result.failed or not result.data:
1383 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1384 07bd8a51 Iustin Pop
1385 07bd8a51 Iustin Pop
    try:
1386 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1387 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1388 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1389 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1390 ec85e3d5 Iustin Pop
1391 ec85e3d5 Iustin Pop
      # update the known hosts file
1392 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1393 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1394 ec85e3d5 Iustin Pop
      try:
1395 ec85e3d5 Iustin Pop
        node_list.remove(master)
1396 ec85e3d5 Iustin Pop
      except ValueError:
1397 ec85e3d5 Iustin Pop
        pass
1398 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1399 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1400 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1401 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1402 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1403 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1404 ec85e3d5 Iustin Pop
1405 07bd8a51 Iustin Pop
    finally:
1406 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1407 781de953 Iustin Pop
      if result.failed or not result.data:
1408 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1409 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1410 07bd8a51 Iustin Pop
1411 07bd8a51 Iustin Pop
1412 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1413 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1414 8084f9f6 Manuel Franceschini

1415 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1416 e4376078 Iustin Pop
  @param disk: the disk to check
1417 e4376078 Iustin Pop
  @rtype: booleean
1418 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1419 8084f9f6 Manuel Franceschini

1420 8084f9f6 Manuel Franceschini
  """
1421 8084f9f6 Manuel Franceschini
  if disk.children:
1422 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1423 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1424 8084f9f6 Manuel Franceschini
        return True
1425 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1426 8084f9f6 Manuel Franceschini
1427 8084f9f6 Manuel Franceschini
1428 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1429 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1430 8084f9f6 Manuel Franceschini

1431 8084f9f6 Manuel Franceschini
  """
1432 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1433 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1434 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1435 c53279cf Guido Trotter
  REQ_BGL = False
1436 c53279cf Guido Trotter
1437 3994f455 Iustin Pop
  def CheckArguments(self):
1438 4b7735f9 Iustin Pop
    """Check parameters
1439 4b7735f9 Iustin Pop

1440 4b7735f9 Iustin Pop
    """
1441 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1442 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1443 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1444 4b7735f9 Iustin Pop
      try:
1445 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1446 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1447 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1448 4b7735f9 Iustin Pop
                                   str(err))
1449 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1450 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1451 4b7735f9 Iustin Pop
1452 c53279cf Guido Trotter
  def ExpandNames(self):
1453 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1454 c53279cf Guido Trotter
    # all nodes to be modified.
1455 c53279cf Guido Trotter
    self.needed_locks = {
1456 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1457 c53279cf Guido Trotter
    }
1458 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1459 8084f9f6 Manuel Franceschini
1460 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1461 8084f9f6 Manuel Franceschini
    """Build hooks env.
1462 8084f9f6 Manuel Franceschini

1463 8084f9f6 Manuel Franceschini
    """
1464 8084f9f6 Manuel Franceschini
    env = {
1465 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1466 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1467 8084f9f6 Manuel Franceschini
      }
1468 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1469 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1470 8084f9f6 Manuel Franceschini
1471 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1472 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1473 8084f9f6 Manuel Franceschini

1474 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1475 5f83e263 Iustin Pop
    if the given volume group is valid.
1476 8084f9f6 Manuel Franceschini

1477 8084f9f6 Manuel Franceschini
    """
1478 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1479 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1480 8084f9f6 Manuel Franceschini
      for inst in instances:
1481 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1482 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1483 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1484 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1485 8084f9f6 Manuel Franceschini
1486 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1487 779c15bb Iustin Pop
1488 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1489 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1490 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1491 8084f9f6 Manuel Franceschini
      for node in node_list:
1492 781de953 Iustin Pop
        if vglist[node].failed:
1493 781de953 Iustin Pop
          # ignoring down node
1494 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1495 781de953 Iustin Pop
          continue
1496 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1497 781de953 Iustin Pop
                                              self.op.vg_name,
1498 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1499 8084f9f6 Manuel Franceschini
        if vgstatus:
1500 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1501 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1502 8084f9f6 Manuel Franceschini
1503 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1504 d4b72030 Guido Trotter
    # validate beparams changes
1505 779c15bb Iustin Pop
    if self.op.beparams:
1506 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1507 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1508 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1509 779c15bb Iustin Pop
1510 779c15bb Iustin Pop
    # hypervisor list/parameters
1511 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1512 779c15bb Iustin Pop
    if self.op.hvparams:
1513 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1514 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1515 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1516 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1517 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1518 779c15bb Iustin Pop
        else:
1519 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1520 779c15bb Iustin Pop
1521 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1522 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1523 779c15bb Iustin Pop
    else:
1524 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1525 779c15bb Iustin Pop
1526 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1527 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1528 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1529 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1530 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1531 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1532 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1533 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1534 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1535 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1536 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1537 779c15bb Iustin Pop
1538 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1539 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1540 8084f9f6 Manuel Franceschini

1541 8084f9f6 Manuel Franceschini
    """
1542 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1543 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1544 b2482333 Guido Trotter
      if not new_volume:
1545 b2482333 Guido Trotter
        new_volume = None
1546 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1547 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1548 779c15bb Iustin Pop
      else:
1549 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1550 779c15bb Iustin Pop
                    " state, not changing")
1551 779c15bb Iustin Pop
    if self.op.hvparams:
1552 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1553 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1554 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1555 779c15bb Iustin Pop
    if self.op.beparams:
1556 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1557 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1558 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1559 4b7735f9 Iustin Pop
1560 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1561 8084f9f6 Manuel Franceschini
1562 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1563 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1564 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1565 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1566 4b7735f9 Iustin Pop
1567 8084f9f6 Manuel Franceschini
1568 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1569 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1570 afee0879 Iustin Pop

1571 afee0879 Iustin Pop
  This is a very simple LU.
1572 afee0879 Iustin Pop

1573 afee0879 Iustin Pop
  """
1574 afee0879 Iustin Pop
  _OP_REQP = []
1575 afee0879 Iustin Pop
  REQ_BGL = False
1576 afee0879 Iustin Pop
1577 afee0879 Iustin Pop
  def ExpandNames(self):
1578 afee0879 Iustin Pop
    self.needed_locks = {
1579 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1580 afee0879 Iustin Pop
    }
1581 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1582 afee0879 Iustin Pop
1583 afee0879 Iustin Pop
  def CheckPrereq(self):
1584 afee0879 Iustin Pop
    """Check prerequisites.
1585 afee0879 Iustin Pop

1586 afee0879 Iustin Pop
    """
1587 afee0879 Iustin Pop
1588 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1589 afee0879 Iustin Pop
    """Redistribute the configuration.
1590 afee0879 Iustin Pop

1591 afee0879 Iustin Pop
    """
1592 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1593 afee0879 Iustin Pop
1594 afee0879 Iustin Pop
1595 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1596 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1597 a8083063 Iustin Pop

1598 a8083063 Iustin Pop
  """
1599 a8083063 Iustin Pop
  if not instance.disks:
1600 a8083063 Iustin Pop
    return True
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
  if not oneshot:
1603 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1604 a8083063 Iustin Pop
1605 a8083063 Iustin Pop
  node = instance.primary_node
1606 a8083063 Iustin Pop
1607 a8083063 Iustin Pop
  for dev in instance.disks:
1608 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1609 a8083063 Iustin Pop
1610 a8083063 Iustin Pop
  retries = 0
1611 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1612 a8083063 Iustin Pop
  while True:
1613 a8083063 Iustin Pop
    max_time = 0
1614 a8083063 Iustin Pop
    done = True
1615 a8083063 Iustin Pop
    cumul_degraded = False
1616 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1617 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1618 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1619 a8083063 Iustin Pop
      retries += 1
1620 a8083063 Iustin Pop
      if retries >= 10:
1621 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1622 3ecf6786 Iustin Pop
                                 " aborting." % node)
1623 a8083063 Iustin Pop
      time.sleep(6)
1624 a8083063 Iustin Pop
      continue
1625 781de953 Iustin Pop
    rstats = rstats.data
1626 a8083063 Iustin Pop
    retries = 0
1627 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1628 a8083063 Iustin Pop
      if mstat is None:
1629 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1630 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1631 a8083063 Iustin Pop
        continue
1632 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1633 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1634 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1635 a8083063 Iustin Pop
      if perc_done is not None:
1636 a8083063 Iustin Pop
        done = False
1637 a8083063 Iustin Pop
        if est_time is not None:
1638 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1639 a8083063 Iustin Pop
          max_time = est_time
1640 a8083063 Iustin Pop
        else:
1641 a8083063 Iustin Pop
          rem_time = "no time estimate"
1642 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1643 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1644 fbafd7a8 Iustin Pop
1645 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1646 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1647 fbafd7a8 Iustin Pop
    # we force restart of the loop
1648 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1649 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1650 fbafd7a8 Iustin Pop
      degr_retries -= 1
1651 fbafd7a8 Iustin Pop
      time.sleep(1)
1652 fbafd7a8 Iustin Pop
      continue
1653 fbafd7a8 Iustin Pop
1654 a8083063 Iustin Pop
    if done or oneshot:
1655 a8083063 Iustin Pop
      break
1656 a8083063 Iustin Pop
1657 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1658 a8083063 Iustin Pop
1659 a8083063 Iustin Pop
  if done:
1660 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1661 a8083063 Iustin Pop
  return not cumul_degraded
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
1664 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1665 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1666 a8083063 Iustin Pop

1667 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1668 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1669 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1670 0834c866 Iustin Pop

1671 a8083063 Iustin Pop
  """
1672 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1673 0834c866 Iustin Pop
  if ldisk:
1674 0834c866 Iustin Pop
    idx = 6
1675 0834c866 Iustin Pop
  else:
1676 0834c866 Iustin Pop
    idx = 5
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
  result = True
1679 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1680 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1681 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1682 23829f6f Iustin Pop
    if msg:
1683 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1684 23829f6f Iustin Pop
      result = False
1685 23829f6f Iustin Pop
    elif not rstats.payload:
1686 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1687 a8083063 Iustin Pop
      result = False
1688 a8083063 Iustin Pop
    else:
1689 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1690 a8083063 Iustin Pop
  if dev.children:
1691 a8083063 Iustin Pop
    for child in dev.children:
1692 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1693 a8083063 Iustin Pop
1694 a8083063 Iustin Pop
  return result
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1698 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1699 a8083063 Iustin Pop

1700 a8083063 Iustin Pop
  """
1701 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1702 6bf01bbb Guido Trotter
  REQ_BGL = False
1703 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1704 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1705 a8083063 Iustin Pop
1706 6bf01bbb Guido Trotter
  def ExpandNames(self):
1707 1f9430d6 Iustin Pop
    if self.op.names:
1708 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1709 1f9430d6 Iustin Pop
1710 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1711 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1712 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1713 1f9430d6 Iustin Pop
1714 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1715 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1716 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1717 6bf01bbb Guido Trotter
    self.needed_locks = {}
1718 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1719 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1720 6bf01bbb Guido Trotter
1721 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1722 6bf01bbb Guido Trotter
    """Check prerequisites.
1723 6bf01bbb Guido Trotter

1724 6bf01bbb Guido Trotter
    """
1725 6bf01bbb Guido Trotter
1726 1f9430d6 Iustin Pop
  @staticmethod
1727 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1728 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1729 1f9430d6 Iustin Pop

1730 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1731 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1732 1f9430d6 Iustin Pop

1733 e4376078 Iustin Pop
    @rtype: dict
1734 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1735 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1736 e4376078 Iustin Pop

1737 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1738 e4376078 Iustin Pop
                           "node2": [<object>,]}
1739 e4376078 Iustin Pop
          }
1740 1f9430d6 Iustin Pop

1741 1f9430d6 Iustin Pop
    """
1742 1f9430d6 Iustin Pop
    all_os = {}
1743 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1744 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1745 a6ab004b Iustin Pop
    # make all OSes invalid
1746 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1747 a6ab004b Iustin Pop
                  if not rlist[node_name].failed]
1748 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1749 781de953 Iustin Pop
      if nr.failed or not nr.data:
1750 1f9430d6 Iustin Pop
        continue
1751 781de953 Iustin Pop
      for os_obj in nr.data:
1752 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1753 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1754 1f9430d6 Iustin Pop
          # for each node in node_list
1755 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1756 a6ab004b Iustin Pop
          for nname in good_nodes:
1757 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1758 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1759 1f9430d6 Iustin Pop
    return all_os
1760 a8083063 Iustin Pop
1761 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1762 a8083063 Iustin Pop
    """Compute the list of OSes.
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
    """
1765 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1766 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1767 a8083063 Iustin Pop
    if node_data == False:
1768 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1769 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1770 1f9430d6 Iustin Pop
    output = []
1771 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1772 1f9430d6 Iustin Pop
      row = []
1773 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1774 1f9430d6 Iustin Pop
        if field == "name":
1775 1f9430d6 Iustin Pop
          val = os_name
1776 1f9430d6 Iustin Pop
        elif field == "valid":
1777 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1778 1f9430d6 Iustin Pop
        elif field == "node_status":
1779 1f9430d6 Iustin Pop
          val = {}
1780 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1781 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1782 1f9430d6 Iustin Pop
        else:
1783 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1784 1f9430d6 Iustin Pop
        row.append(val)
1785 1f9430d6 Iustin Pop
      output.append(row)
1786 1f9430d6 Iustin Pop
1787 1f9430d6 Iustin Pop
    return output
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
1790 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1791 a8083063 Iustin Pop
  """Logical unit for removing a node.
1792 a8083063 Iustin Pop

1793 a8083063 Iustin Pop
  """
1794 a8083063 Iustin Pop
  HPATH = "node-remove"
1795 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1796 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1797 a8083063 Iustin Pop
1798 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1799 a8083063 Iustin Pop
    """Build hooks env.
1800 a8083063 Iustin Pop

1801 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1802 d08869ee Guido Trotter
    node would then be impossible to remove.
1803 a8083063 Iustin Pop

1804 a8083063 Iustin Pop
    """
1805 396e1b78 Michael Hanselmann
    env = {
1806 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1807 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1808 396e1b78 Michael Hanselmann
      }
1809 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1810 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1811 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1812 a8083063 Iustin Pop
1813 a8083063 Iustin Pop
  def CheckPrereq(self):
1814 a8083063 Iustin Pop
    """Check prerequisites.
1815 a8083063 Iustin Pop

1816 a8083063 Iustin Pop
    This checks:
1817 a8083063 Iustin Pop
     - the node exists in the configuration
1818 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1819 a8083063 Iustin Pop
     - it's not the master
1820 a8083063 Iustin Pop

1821 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1822 a8083063 Iustin Pop

1823 a8083063 Iustin Pop
    """
1824 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1825 a8083063 Iustin Pop
    if node is None:
1826 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1829 a8083063 Iustin Pop
1830 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1831 a8083063 Iustin Pop
    if node.name == masternode:
1832 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1833 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1834 a8083063 Iustin Pop
1835 a8083063 Iustin Pop
    for instance_name in instance_list:
1836 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1837 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1838 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1839 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1840 a8083063 Iustin Pop
    self.op.node_name = node.name
1841 a8083063 Iustin Pop
    self.node = node
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1844 a8083063 Iustin Pop
    """Removes the node from the cluster.
1845 a8083063 Iustin Pop

1846 a8083063 Iustin Pop
    """
1847 a8083063 Iustin Pop
    node = self.node
1848 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1849 9a4f63d1 Iustin Pop
                 node.name)
1850 a8083063 Iustin Pop
1851 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1852 a8083063 Iustin Pop
1853 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1854 c8a0948f Michael Hanselmann
1855 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1856 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1857 eb1742d5 Guido Trotter
1858 a8083063 Iustin Pop
1859 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1860 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1861 a8083063 Iustin Pop

1862 a8083063 Iustin Pop
  """
1863 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1864 35705d8f Guido Trotter
  REQ_BGL = False
1865 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1866 31bf511f Iustin Pop
    "dtotal", "dfree",
1867 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1868 31bf511f Iustin Pop
    "bootid",
1869 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1870 31bf511f Iustin Pop
    )
1871 31bf511f Iustin Pop
1872 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1873 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1874 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1875 31bf511f Iustin Pop
    "pip", "sip", "tags",
1876 31bf511f Iustin Pop
    "serial_no",
1877 0e67cdbe Iustin Pop
    "master_candidate",
1878 0e67cdbe Iustin Pop
    "master",
1879 9ddb5e45 Iustin Pop
    "offline",
1880 0b2454b9 Iustin Pop
    "drained",
1881 31bf511f Iustin Pop
    )
1882 a8083063 Iustin Pop
1883 35705d8f Guido Trotter
  def ExpandNames(self):
1884 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1885 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1886 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1887 a8083063 Iustin Pop
1888 35705d8f Guido Trotter
    self.needed_locks = {}
1889 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1890 c8d8b4c8 Iustin Pop
1891 c8d8b4c8 Iustin Pop
    if self.op.names:
1892 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1893 35705d8f Guido Trotter
    else:
1894 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1895 c8d8b4c8 Iustin Pop
1896 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1897 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1898 c8d8b4c8 Iustin Pop
    if self.do_locking:
1899 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1900 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1901 c8d8b4c8 Iustin Pop
1902 35705d8f Guido Trotter
1903 35705d8f Guido Trotter
  def CheckPrereq(self):
1904 35705d8f Guido Trotter
    """Check prerequisites.
1905 35705d8f Guido Trotter

1906 35705d8f Guido Trotter
    """
1907 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1908 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1909 c8d8b4c8 Iustin Pop
    pass
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1912 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1913 a8083063 Iustin Pop

1914 a8083063 Iustin Pop
    """
1915 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1916 c8d8b4c8 Iustin Pop
    if self.do_locking:
1917 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1918 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1919 3fa93523 Guido Trotter
      nodenames = self.wanted
1920 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1921 3fa93523 Guido Trotter
      if missing:
1922 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1923 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1924 c8d8b4c8 Iustin Pop
    else:
1925 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1926 c1f1cbb2 Iustin Pop
1927 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1928 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1929 a8083063 Iustin Pop
1930 a8083063 Iustin Pop
    # begin data gathering
1931 a8083063 Iustin Pop
1932 bc8e4a1a Iustin Pop
    if self.do_node_query:
1933 a8083063 Iustin Pop
      live_data = {}
1934 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
1935 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
1936 a8083063 Iustin Pop
      for name in nodenames:
1937 781de953 Iustin Pop
        nodeinfo = node_data[name]
1938 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
1939 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
1940 d599d686 Iustin Pop
          fn = utils.TryConvert
1941 a8083063 Iustin Pop
          live_data[name] = {
1942 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
1943 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
1944 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
1945 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
1946 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
1947 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
1948 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
1949 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
1950 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
1951 a8083063 Iustin Pop
            }
1952 a8083063 Iustin Pop
        else:
1953 a8083063 Iustin Pop
          live_data[name] = {}
1954 a8083063 Iustin Pop
    else:
1955 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1956 a8083063 Iustin Pop
1957 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1958 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1959 a8083063 Iustin Pop
1960 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1961 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1962 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1963 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1964 a8083063 Iustin Pop
1965 ec223efb Iustin Pop
      for instance_name in instancelist:
1966 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1967 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1968 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1969 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1970 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1971 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1972 a8083063 Iustin Pop
1973 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
1974 0e67cdbe Iustin Pop
1975 a8083063 Iustin Pop
    # end data gathering
1976 a8083063 Iustin Pop
1977 a8083063 Iustin Pop
    output = []
1978 a8083063 Iustin Pop
    for node in nodelist:
1979 a8083063 Iustin Pop
      node_output = []
1980 a8083063 Iustin Pop
      for field in self.op.output_fields:
1981 a8083063 Iustin Pop
        if field == "name":
1982 a8083063 Iustin Pop
          val = node.name
1983 ec223efb Iustin Pop
        elif field == "pinst_list":
1984 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1985 ec223efb Iustin Pop
        elif field == "sinst_list":
1986 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1987 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1988 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1989 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1990 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1991 a8083063 Iustin Pop
        elif field == "pip":
1992 a8083063 Iustin Pop
          val = node.primary_ip
1993 a8083063 Iustin Pop
        elif field == "sip":
1994 a8083063 Iustin Pop
          val = node.secondary_ip
1995 130a6a6f Iustin Pop
        elif field == "tags":
1996 130a6a6f Iustin Pop
          val = list(node.GetTags())
1997 38d7239a Iustin Pop
        elif field == "serial_no":
1998 38d7239a Iustin Pop
          val = node.serial_no
1999 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2000 0e67cdbe Iustin Pop
          val = node.master_candidate
2001 0e67cdbe Iustin Pop
        elif field == "master":
2002 0e67cdbe Iustin Pop
          val = node.name == master_node
2003 9ddb5e45 Iustin Pop
        elif field == "offline":
2004 9ddb5e45 Iustin Pop
          val = node.offline
2005 0b2454b9 Iustin Pop
        elif field == "drained":
2006 0b2454b9 Iustin Pop
          val = node.drained
2007 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2008 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2009 a8083063 Iustin Pop
        else:
2010 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2011 a8083063 Iustin Pop
        node_output.append(val)
2012 a8083063 Iustin Pop
      output.append(node_output)
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
    return output
2015 a8083063 Iustin Pop
2016 a8083063 Iustin Pop
2017 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2018 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2019 dcb93971 Michael Hanselmann

2020 dcb93971 Michael Hanselmann
  """
2021 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2022 21a15682 Guido Trotter
  REQ_BGL = False
2023 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2024 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2025 21a15682 Guido Trotter
2026 21a15682 Guido Trotter
  def ExpandNames(self):
2027 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2028 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2029 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2030 21a15682 Guido Trotter
2031 21a15682 Guido Trotter
    self.needed_locks = {}
2032 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2033 21a15682 Guido Trotter
    if not self.op.nodes:
2034 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2035 21a15682 Guido Trotter
    else:
2036 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2037 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2038 dcb93971 Michael Hanselmann
2039 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2040 dcb93971 Michael Hanselmann
    """Check prerequisites.
2041 dcb93971 Michael Hanselmann

2042 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2043 dcb93971 Michael Hanselmann

2044 dcb93971 Michael Hanselmann
    """
2045 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2046 dcb93971 Michael Hanselmann
2047 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2048 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2049 dcb93971 Michael Hanselmann

2050 dcb93971 Michael Hanselmann
    """
2051 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2052 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2053 dcb93971 Michael Hanselmann
2054 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2055 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2056 dcb93971 Michael Hanselmann
2057 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2058 dcb93971 Michael Hanselmann
2059 dcb93971 Michael Hanselmann
    output = []
2060 dcb93971 Michael Hanselmann
    for node in nodenames:
2061 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
2062 37d19eb2 Michael Hanselmann
        continue
2063 37d19eb2 Michael Hanselmann
2064 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
2065 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2066 dcb93971 Michael Hanselmann
2067 dcb93971 Michael Hanselmann
      for vol in node_vols:
2068 dcb93971 Michael Hanselmann
        node_output = []
2069 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2070 dcb93971 Michael Hanselmann
          if field == "node":
2071 dcb93971 Michael Hanselmann
            val = node
2072 dcb93971 Michael Hanselmann
          elif field == "phys":
2073 dcb93971 Michael Hanselmann
            val = vol['dev']
2074 dcb93971 Michael Hanselmann
          elif field == "vg":
2075 dcb93971 Michael Hanselmann
            val = vol['vg']
2076 dcb93971 Michael Hanselmann
          elif field == "name":
2077 dcb93971 Michael Hanselmann
            val = vol['name']
2078 dcb93971 Michael Hanselmann
          elif field == "size":
2079 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2080 dcb93971 Michael Hanselmann
          elif field == "instance":
2081 dcb93971 Michael Hanselmann
            for inst in ilist:
2082 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2083 dcb93971 Michael Hanselmann
                continue
2084 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2085 dcb93971 Michael Hanselmann
                val = inst.name
2086 dcb93971 Michael Hanselmann
                break
2087 dcb93971 Michael Hanselmann
            else:
2088 dcb93971 Michael Hanselmann
              val = '-'
2089 dcb93971 Michael Hanselmann
          else:
2090 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2091 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2092 dcb93971 Michael Hanselmann
2093 dcb93971 Michael Hanselmann
        output.append(node_output)
2094 dcb93971 Michael Hanselmann
2095 dcb93971 Michael Hanselmann
    return output
2096 dcb93971 Michael Hanselmann
2097 dcb93971 Michael Hanselmann
2098 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2099 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2100 a8083063 Iustin Pop

2101 a8083063 Iustin Pop
  """
2102 a8083063 Iustin Pop
  HPATH = "node-add"
2103 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2104 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2105 a8083063 Iustin Pop
2106 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2107 a8083063 Iustin Pop
    """Build hooks env.
2108 a8083063 Iustin Pop

2109 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2110 a8083063 Iustin Pop

2111 a8083063 Iustin Pop
    """
2112 a8083063 Iustin Pop
    env = {
2113 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2114 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2115 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2116 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2117 a8083063 Iustin Pop
      }
2118 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2119 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2120 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2121 a8083063 Iustin Pop
2122 a8083063 Iustin Pop
  def CheckPrereq(self):
2123 a8083063 Iustin Pop
    """Check prerequisites.
2124 a8083063 Iustin Pop

2125 a8083063 Iustin Pop
    This checks:
2126 a8083063 Iustin Pop
     - the new node is not already in the config
2127 a8083063 Iustin Pop
     - it is resolvable
2128 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2129 a8083063 Iustin Pop

2130 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2131 a8083063 Iustin Pop

2132 a8083063 Iustin Pop
    """
2133 a8083063 Iustin Pop
    node_name = self.op.node_name
2134 a8083063 Iustin Pop
    cfg = self.cfg
2135 a8083063 Iustin Pop
2136 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2137 a8083063 Iustin Pop
2138 bcf043c9 Iustin Pop
    node = dns_data.name
2139 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2140 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2141 a8083063 Iustin Pop
    if secondary_ip is None:
2142 a8083063 Iustin Pop
      secondary_ip = primary_ip
2143 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2144 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2145 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2146 e7c6e02b Michael Hanselmann
2147 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2148 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2149 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2150 e7c6e02b Michael Hanselmann
                                 node)
2151 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2152 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2153 a8083063 Iustin Pop
2154 a8083063 Iustin Pop
    for existing_node_name in node_list:
2155 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2156 e7c6e02b Michael Hanselmann
2157 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2158 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2159 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2160 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2161 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2162 e7c6e02b Michael Hanselmann
        continue
2163 e7c6e02b Michael Hanselmann
2164 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2165 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2166 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2167 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2168 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2169 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2170 a8083063 Iustin Pop
2171 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2172 a8083063 Iustin Pop
    # same as for the master
2173 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2174 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2175 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2176 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2177 a8083063 Iustin Pop
      if master_singlehomed:
2178 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2179 3ecf6786 Iustin Pop
                                   " new node has one")
2180 a8083063 Iustin Pop
      else:
2181 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2182 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
    # checks reachablity
2185 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2186 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2187 a8083063 Iustin Pop
2188 a8083063 Iustin Pop
    if not newbie_singlehomed:
2189 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2190 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2191 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2192 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2193 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2194 a8083063 Iustin Pop
2195 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2196 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2197 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2198 0fff97e9 Guido Trotter
2199 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2200 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2201 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2202 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2203 af64c0ea Iustin Pop
                                 offline=False, drained=False)
2204 a8083063 Iustin Pop
2205 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2206 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2207 a8083063 Iustin Pop

2208 a8083063 Iustin Pop
    """
2209 a8083063 Iustin Pop
    new_node = self.new_node
2210 a8083063 Iustin Pop
    node = new_node.name
2211 a8083063 Iustin Pop
2212 a8083063 Iustin Pop
    # check connectivity
2213 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2214 781de953 Iustin Pop
    result.Raise()
2215 781de953 Iustin Pop
    if result.data:
2216 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2217 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2218 781de953 Iustin Pop
                     node, result.data)
2219 a8083063 Iustin Pop
      else:
2220 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2221 3ecf6786 Iustin Pop
                                 " node version %s" %
2222 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2223 a8083063 Iustin Pop
    else:
2224 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2225 a8083063 Iustin Pop
2226 a8083063 Iustin Pop
    # setup ssh on node
2227 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2228 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2229 a8083063 Iustin Pop
    keyarray = []
2230 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2231 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2232 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2233 a8083063 Iustin Pop
2234 a8083063 Iustin Pop
    for i in keyfiles:
2235 a8083063 Iustin Pop
      f = open(i, 'r')
2236 a8083063 Iustin Pop
      try:
2237 a8083063 Iustin Pop
        keyarray.append(f.read())
2238 a8083063 Iustin Pop
      finally:
2239 a8083063 Iustin Pop
        f.close()
2240 a8083063 Iustin Pop
2241 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2242 72737a7f Iustin Pop
                                    keyarray[2],
2243 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2244 a8083063 Iustin Pop
2245 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2246 a1b805fb Iustin Pop
    if msg:
2247 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2248 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2249 a8083063 Iustin Pop
2250 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2251 d9c02ca6 Michael Hanselmann
    utils.AddHostToEtcHosts(new_node.name)
2252 c8a0948f Michael Hanselmann
2253 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2254 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2255 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2256 781de953 Iustin Pop
      if result.failed or not result.data:
2257 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2258 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2259 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2260 a8083063 Iustin Pop
2261 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2262 5c0527ed Guido Trotter
    node_verify_param = {
2263 5c0527ed Guido Trotter
      'nodelist': [node],
2264 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2265 5c0527ed Guido Trotter
    }
2266 5c0527ed Guido Trotter
2267 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2268 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2269 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2270 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2271 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2272 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2273 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2274 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2275 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2276 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2277 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2278 ff98055b Iustin Pop
2279 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2280 a8083063 Iustin Pop
    # including the node just added
2281 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2282 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2283 102b115b Michael Hanselmann
    if not self.op.readd:
2284 102b115b Michael Hanselmann
      dist_nodes.append(node)
2285 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2286 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2287 a8083063 Iustin Pop
2288 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2289 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2290 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2291 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2292 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2293 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2294 a8083063 Iustin Pop
2295 d6a02168 Michael Hanselmann
    to_copy = []
2296 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2297 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2298 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2299 2928f08d Guido Trotter
2300 a8083063 Iustin Pop
    for fname in to_copy:
2301 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2302 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2303 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2304 a8083063 Iustin Pop
2305 d8470559 Michael Hanselmann
    if self.op.readd:
2306 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2307 d8470559 Michael Hanselmann
    else:
2308 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
2311 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2312 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2313 b31c8676 Iustin Pop

2314 b31c8676 Iustin Pop
  """
2315 b31c8676 Iustin Pop
  HPATH = "node-modify"
2316 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2317 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2318 b31c8676 Iustin Pop
  REQ_BGL = False
2319 b31c8676 Iustin Pop
2320 b31c8676 Iustin Pop
  def CheckArguments(self):
2321 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2322 b31c8676 Iustin Pop
    if node_name is None:
2323 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2324 b31c8676 Iustin Pop
    self.op.node_name = node_name
2325 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2326 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2327 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2328 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2329 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2330 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2331 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2332 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2333 c9d443ea Iustin Pop
                                 " state at the same time")
2334 b31c8676 Iustin Pop
2335 b31c8676 Iustin Pop
  def ExpandNames(self):
2336 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2337 b31c8676 Iustin Pop
2338 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2339 b31c8676 Iustin Pop
    """Build hooks env.
2340 b31c8676 Iustin Pop

2341 b31c8676 Iustin Pop
    This runs on the master node.
2342 b31c8676 Iustin Pop

2343 b31c8676 Iustin Pop
    """
2344 b31c8676 Iustin Pop
    env = {
2345 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2346 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2347 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2348 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2349 b31c8676 Iustin Pop
      }
2350 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2351 b31c8676 Iustin Pop
          self.op.node_name]
2352 b31c8676 Iustin Pop
    return env, nl, nl
2353 b31c8676 Iustin Pop
2354 b31c8676 Iustin Pop
  def CheckPrereq(self):
2355 b31c8676 Iustin Pop
    """Check prerequisites.
2356 b31c8676 Iustin Pop

2357 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2358 b31c8676 Iustin Pop

2359 b31c8676 Iustin Pop
    """
2360 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2361 b31c8676 Iustin Pop
2362 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2363 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2364 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2365 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2366 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2367 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2368 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2369 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2370 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2371 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2372 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2373 3a5ba66a Iustin Pop
        if self.op.force:
2374 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2375 3e83dd48 Iustin Pop
        else:
2376 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2377 3e83dd48 Iustin Pop
2378 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2379 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2380 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2381 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2382 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2383 3a5ba66a Iustin Pop
2384 b31c8676 Iustin Pop
    return
2385 b31c8676 Iustin Pop
2386 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2387 b31c8676 Iustin Pop
    """Modifies a node.
2388 b31c8676 Iustin Pop

2389 b31c8676 Iustin Pop
    """
2390 3a5ba66a Iustin Pop
    node = self.node
2391 b31c8676 Iustin Pop
2392 b31c8676 Iustin Pop
    result = []
2393 c9d443ea Iustin Pop
    changed_mc = False
2394 b31c8676 Iustin Pop
2395 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2396 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2397 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2398 c9d443ea Iustin Pop
      if self.op.offline == True:
2399 c9d443ea Iustin Pop
        if node.master_candidate:
2400 c9d443ea Iustin Pop
          node.master_candidate = False
2401 c9d443ea Iustin Pop
          changed_mc = True
2402 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2403 c9d443ea Iustin Pop
        if node.drained:
2404 c9d443ea Iustin Pop
          node.drained = False
2405 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2406 3a5ba66a Iustin Pop
2407 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2408 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2409 c9d443ea Iustin Pop
      changed_mc = True
2410 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2411 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2412 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2413 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2414 0959c824 Iustin Pop
        if msg:
2415 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2416 b31c8676 Iustin Pop
2417 c9d443ea Iustin Pop
    if self.op.drained is not None:
2418 c9d443ea Iustin Pop
      node.drained = self.op.drained
2419 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2420 c9d443ea Iustin Pop
      if self.op.drained == True:
2421 c9d443ea Iustin Pop
        if node.master_candidate:
2422 c9d443ea Iustin Pop
          node.master_candidate = False
2423 c9d443ea Iustin Pop
          changed_mc = True
2424 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2425 c9d443ea Iustin Pop
        if node.offline:
2426 c9d443ea Iustin Pop
          node.offline = False
2427 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2428 c9d443ea Iustin Pop
2429 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2430 b31c8676 Iustin Pop
    self.cfg.Update(node)
2431 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2432 c9d443ea Iustin Pop
    if changed_mc:
2433 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2434 b31c8676 Iustin Pop
2435 b31c8676 Iustin Pop
    return result
2436 b31c8676 Iustin Pop
2437 b31c8676 Iustin Pop
2438 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2439 a8083063 Iustin Pop
  """Query cluster configuration.
2440 a8083063 Iustin Pop

2441 a8083063 Iustin Pop
  """
2442 a8083063 Iustin Pop
  _OP_REQP = []
2443 642339cf Guido Trotter
  REQ_BGL = False
2444 642339cf Guido Trotter
2445 642339cf Guido Trotter
  def ExpandNames(self):
2446 642339cf Guido Trotter
    self.needed_locks = {}
2447 a8083063 Iustin Pop
2448 a8083063 Iustin Pop
  def CheckPrereq(self):
2449 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2450 a8083063 Iustin Pop

2451 a8083063 Iustin Pop
    """
2452 a8083063 Iustin Pop
    pass
2453 a8083063 Iustin Pop
2454 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2455 a8083063 Iustin Pop
    """Return cluster config.
2456 a8083063 Iustin Pop

2457 a8083063 Iustin Pop
    """
2458 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2459 a8083063 Iustin Pop
    result = {
2460 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2461 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2462 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2463 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2464 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2465 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2466 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2467 469f88e1 Iustin Pop
      "master": cluster.master_node,
2468 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2469 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2470 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2471 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2472 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2473 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2474 7a56b411 Guido Trotter
      "default_bridge": cluster.default_bridge,
2475 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2476 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2477 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2478 a8083063 Iustin Pop
      }
2479 a8083063 Iustin Pop
2480 a8083063 Iustin Pop
    return result
2481 a8083063 Iustin Pop
2482 a8083063 Iustin Pop
2483 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2484 ae5849b5 Michael Hanselmann
  """Return configuration values.
2485 a8083063 Iustin Pop

2486 a8083063 Iustin Pop
  """
2487 a8083063 Iustin Pop
  _OP_REQP = []
2488 642339cf Guido Trotter
  REQ_BGL = False
2489 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2490 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2491 642339cf Guido Trotter
2492 642339cf Guido Trotter
  def ExpandNames(self):
2493 642339cf Guido Trotter
    self.needed_locks = {}
2494 a8083063 Iustin Pop
2495 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2496 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2497 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2498 ae5849b5 Michael Hanselmann
2499 a8083063 Iustin Pop
  def CheckPrereq(self):
2500 a8083063 Iustin Pop
    """No prerequisites.
2501 a8083063 Iustin Pop

2502 a8083063 Iustin Pop
    """
2503 a8083063 Iustin Pop
    pass
2504 a8083063 Iustin Pop
2505 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2506 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2507 a8083063 Iustin Pop

2508 a8083063 Iustin Pop
    """
2509 ae5849b5 Michael Hanselmann
    values = []
2510 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2511 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2512 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2513 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2514 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2515 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2516 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2517 ae5849b5 Michael Hanselmann
      else:
2518 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2519 3ccafd0e Iustin Pop
      values.append(entry)
2520 ae5849b5 Michael Hanselmann
    return values
2521 a8083063 Iustin Pop
2522 a8083063 Iustin Pop
2523 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2524 a8083063 Iustin Pop
  """Bring up an instance's disks.
2525 a8083063 Iustin Pop

2526 a8083063 Iustin Pop
  """
2527 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2528 f22a8ba3 Guido Trotter
  REQ_BGL = False
2529 f22a8ba3 Guido Trotter
2530 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2531 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2532 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2533 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2534 f22a8ba3 Guido Trotter
2535 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2536 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2537 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2538 a8083063 Iustin Pop
2539 a8083063 Iustin Pop
  def CheckPrereq(self):
2540 a8083063 Iustin Pop
    """Check prerequisites.
2541 a8083063 Iustin Pop

2542 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2543 a8083063 Iustin Pop

2544 a8083063 Iustin Pop
    """
2545 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2546 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2547 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2548 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2551 a8083063 Iustin Pop
    """Activate the disks.
2552 a8083063 Iustin Pop

2553 a8083063 Iustin Pop
    """
2554 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2555 a8083063 Iustin Pop
    if not disks_ok:
2556 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2557 a8083063 Iustin Pop
2558 a8083063 Iustin Pop
    return disks_info
2559 a8083063 Iustin Pop
2560 a8083063 Iustin Pop
2561 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2562 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2563 a8083063 Iustin Pop

2564 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2565 a8083063 Iustin Pop

2566 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2567 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2568 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2569 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2570 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2571 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2572 e4376078 Iustin Pop
      won't result in an error return from the function
2573 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2574 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2575 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2576 a8083063 Iustin Pop

2577 a8083063 Iustin Pop
  """
2578 a8083063 Iustin Pop
  device_info = []
2579 a8083063 Iustin Pop
  disks_ok = True
2580 fdbd668d Iustin Pop
  iname = instance.name
2581 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2582 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2583 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2584 fdbd668d Iustin Pop
2585 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2586 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2587 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2588 fdbd668d Iustin Pop
  # SyncSource, etc.)
2589 fdbd668d Iustin Pop
2590 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2591 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2592 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2593 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2594 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2595 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2596 53c14ef1 Iustin Pop
      if msg:
2597 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2598 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2599 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2600 fdbd668d Iustin Pop
        if not ignore_secondaries:
2601 a8083063 Iustin Pop
          disks_ok = False
2602 fdbd668d Iustin Pop
2603 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2604 fdbd668d Iustin Pop
2605 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2606 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2607 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2608 fdbd668d Iustin Pop
      if node != instance.primary_node:
2609 fdbd668d Iustin Pop
        continue
2610 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2611 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2612 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2613 53c14ef1 Iustin Pop
      if msg:
2614 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2615 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2616 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2617 fdbd668d Iustin Pop
        disks_ok = False
2618 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2619 1dff8e07 Iustin Pop
                        result.payload))
2620 a8083063 Iustin Pop
2621 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2622 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2623 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2624 b352ab5b Iustin Pop
  for disk in instance.disks:
2625 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2626 b352ab5b Iustin Pop
2627 a8083063 Iustin Pop
  return disks_ok, device_info
2628 a8083063 Iustin Pop
2629 a8083063 Iustin Pop
2630 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2631 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2632 3ecf6786 Iustin Pop

2633 3ecf6786 Iustin Pop
  """
2634 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2635 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2636 fe7b0351 Michael Hanselmann
  if not disks_ok:
2637 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2638 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2639 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2640 86d9d3bb Iustin Pop
                         " secondary node,"
2641 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2642 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2643 fe7b0351 Michael Hanselmann
2644 fe7b0351 Michael Hanselmann
2645 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2646 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2647 a8083063 Iustin Pop

2648 a8083063 Iustin Pop
  """
2649 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2650 f22a8ba3 Guido Trotter
  REQ_BGL = False
2651 f22a8ba3 Guido Trotter
2652 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2653 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2654 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2655 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2656 f22a8ba3 Guido Trotter
2657 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2658 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2659 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
  def CheckPrereq(self):
2662 a8083063 Iustin Pop
    """Check prerequisites.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2665 a8083063 Iustin Pop

2666 a8083063 Iustin Pop
    """
2667 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2668 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2669 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2670 a8083063 Iustin Pop
2671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2672 a8083063 Iustin Pop
    """Deactivate the disks
2673 a8083063 Iustin Pop

2674 a8083063 Iustin Pop
    """
2675 a8083063 Iustin Pop
    instance = self.instance
2676 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2677 a8083063 Iustin Pop
2678 a8083063 Iustin Pop
2679 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2680 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2681 155d6c75 Guido Trotter

2682 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2683 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2684 155d6c75 Guido Trotter

2685 155d6c75 Guido Trotter
  """
2686 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2687 72737a7f Iustin Pop
                                      [instance.hypervisor])
2688 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2689 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2690 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2691 155d6c75 Guido Trotter
                             instance.primary_node)
2692 155d6c75 Guido Trotter
2693 781de953 Iustin Pop
  if instance.name in ins_l.data:
2694 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2695 155d6c75 Guido Trotter
                             " block devices.")
2696 155d6c75 Guido Trotter
2697 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2698 a8083063 Iustin Pop
2699 a8083063 Iustin Pop
2700 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2701 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2702 a8083063 Iustin Pop

2703 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2704 a8083063 Iustin Pop

2705 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2706 a8083063 Iustin Pop
  ignored.
2707 a8083063 Iustin Pop

2708 a8083063 Iustin Pop
  """
2709 cacfd1fd Iustin Pop
  all_result = True
2710 a8083063 Iustin Pop
  for disk in instance.disks:
2711 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2712 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2713 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2714 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2715 cacfd1fd Iustin Pop
      if msg:
2716 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2717 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2718 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2719 cacfd1fd Iustin Pop
          all_result = False
2720 cacfd1fd Iustin Pop
  return all_result
2721 a8083063 Iustin Pop
2722 a8083063 Iustin Pop
2723 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2724 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2725 d4f16fd9 Iustin Pop

2726 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2727 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2728 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2729 d4f16fd9 Iustin Pop
  exception.
2730 d4f16fd9 Iustin Pop

2731 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2732 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2733 e69d05fd Iustin Pop
  @type node: C{str}
2734 e69d05fd Iustin Pop
  @param node: the node to check
2735 e69d05fd Iustin Pop
  @type reason: C{str}
2736 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2737 e69d05fd Iustin Pop
  @type requested: C{int}
2738 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2739 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2740 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2741 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2742 e69d05fd Iustin Pop
      we cannot check the node
2743 d4f16fd9 Iustin Pop

2744 d4f16fd9 Iustin Pop
  """
2745 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2746 781de953 Iustin Pop
  nodeinfo[node].Raise()
2747 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2748 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2749 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2750 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2751 d4f16fd9 Iustin Pop
  if requested > free_mem:
2752 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2753 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2754 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2755 d4f16fd9 Iustin Pop
2756 d4f16fd9 Iustin Pop
2757 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2758 a8083063 Iustin Pop
  """Starts an instance.
2759 a8083063 Iustin Pop

2760 a8083063 Iustin Pop
  """
2761 a8083063 Iustin Pop
  HPATH = "instance-start"
2762 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2763 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2764 e873317a Guido Trotter
  REQ_BGL = False
2765 e873317a Guido Trotter
2766 e873317a Guido Trotter
  def ExpandNames(self):
2767 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2768 a8083063 Iustin Pop
2769 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2770 a8083063 Iustin Pop
    """Build hooks env.
2771 a8083063 Iustin Pop

2772 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2773 a8083063 Iustin Pop

2774 a8083063 Iustin Pop
    """
2775 a8083063 Iustin Pop
    env = {
2776 a8083063 Iustin Pop
      "FORCE": self.op.force,
2777 a8083063 Iustin Pop
      }
2778 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2779 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2780 a8083063 Iustin Pop
    return env, nl, nl
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
  def CheckPrereq(self):
2783 a8083063 Iustin Pop
    """Check prerequisites.
2784 a8083063 Iustin Pop

2785 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2786 a8083063 Iustin Pop

2787 a8083063 Iustin Pop
    """
2788 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2789 e873317a Guido Trotter
    assert self.instance is not None, \
2790 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2791 a8083063 Iustin Pop
2792 d04aaa2f Iustin Pop
    # extra beparams
2793 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2794 d04aaa2f Iustin Pop
    if self.beparams:
2795 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2796 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2797 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2798 d04aaa2f Iustin Pop
      # fill the beparams dict
2799 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2800 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2801 d04aaa2f Iustin Pop
2802 d04aaa2f Iustin Pop
    # extra hvparams
2803 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2804 d04aaa2f Iustin Pop
    if self.hvparams:
2805 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2806 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2807 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2808 d04aaa2f Iustin Pop
2809 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2810 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2811 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2812 d04aaa2f Iustin Pop
      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
2813 d04aaa2f Iustin Pop
                                    instance.hvparams)
2814 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2815 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2816 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
2817 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2818 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
2819 d04aaa2f Iustin Pop
2820 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2821 7527a8a4 Iustin Pop
2822 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2823 a8083063 Iustin Pop
    # check bridges existance
2824 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2825 a8083063 Iustin Pop
2826 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2827 f1926756 Guido Trotter
                                              instance.name,
2828 f1926756 Guido Trotter
                                              instance.hypervisor)
2829 f1926756 Guido Trotter
    remote_info.Raise()
2830 f1926756 Guido Trotter
    if not remote_info.data:
2831 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
2832 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
2833 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
2834 d4f16fd9 Iustin Pop
2835 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2836 a8083063 Iustin Pop
    """Start the instance.
2837 a8083063 Iustin Pop

2838 a8083063 Iustin Pop
    """
2839 a8083063 Iustin Pop
    instance = self.instance
2840 a8083063 Iustin Pop
    force = self.op.force
2841 a8083063 Iustin Pop
2842 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2843 fe482621 Iustin Pop
2844 a8083063 Iustin Pop
    node_current = instance.primary_node
2845 a8083063 Iustin Pop
2846 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2847 a8083063 Iustin Pop
2848 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
2849 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
2850 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
2851 dd279568 Iustin Pop
    if msg:
2852 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2853 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2854 a8083063 Iustin Pop
2855 a8083063 Iustin Pop
2856 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2857 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2858 bf6929a2 Alexander Schreiber

2859 bf6929a2 Alexander Schreiber
  """
2860 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2861 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2862 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2863 e873317a Guido Trotter
  REQ_BGL = False
2864 e873317a Guido Trotter
2865 e873317a Guido Trotter
  def ExpandNames(self):
2866 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2867 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2868 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2869 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2870 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2871 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2872 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2873 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2874 bf6929a2 Alexander Schreiber
2875 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2876 bf6929a2 Alexander Schreiber
    """Build hooks env.
2877 bf6929a2 Alexander Schreiber

2878 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2879 bf6929a2 Alexander Schreiber

2880 bf6929a2 Alexander Schreiber
    """
2881 bf6929a2 Alexander Schreiber
    env = {
2882 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2883 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
2884 bf6929a2 Alexander Schreiber
      }
2885 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2886 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2887 bf6929a2 Alexander Schreiber
    return env, nl, nl
2888 bf6929a2 Alexander Schreiber
2889 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2890 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2891 bf6929a2 Alexander Schreiber

2892 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2893 bf6929a2 Alexander Schreiber

2894 bf6929a2 Alexander Schreiber
    """
2895 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2896 e873317a Guido Trotter
    assert self.instance is not None, \
2897 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2898 bf6929a2 Alexander Schreiber
2899 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2900 7527a8a4 Iustin Pop
2901 bf6929a2 Alexander Schreiber
    # check bridges existance
2902 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2903 bf6929a2 Alexander Schreiber
2904 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2905 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2906 bf6929a2 Alexander Schreiber

2907 bf6929a2 Alexander Schreiber
    """
2908 bf6929a2 Alexander Schreiber
    instance = self.instance
2909 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2910 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2911 bf6929a2 Alexander Schreiber
2912 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2913 bf6929a2 Alexander Schreiber
2914 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2915 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2916 ae48ac32 Iustin Pop
      for disk in instance.disks:
2917 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
2918 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
2919 07813a9e Iustin Pop
                                             reboot_type)
2920 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
2921 489fcbe9 Iustin Pop
      if msg:
2922 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
2923 bf6929a2 Alexander Schreiber
    else:
2924 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
2925 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
2926 1fae010f Iustin Pop
      if msg:
2927 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
2928 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
2929 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2930 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
2931 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
2932 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
2933 dd279568 Iustin Pop
      if msg:
2934 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
2935 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
2936 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
2937 bf6929a2 Alexander Schreiber
2938 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2939 bf6929a2 Alexander Schreiber
2940 bf6929a2 Alexander Schreiber
2941 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2942 a8083063 Iustin Pop
  """Shutdown an instance.
2943 a8083063 Iustin Pop

2944 a8083063 Iustin Pop
  """
2945 a8083063 Iustin Pop
  HPATH = "instance-stop"
2946 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2947 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2948 e873317a Guido Trotter
  REQ_BGL = False
2949 e873317a Guido Trotter
2950 e873317a Guido Trotter
  def ExpandNames(self):
2951 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2952 a8083063 Iustin Pop
2953 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2954 a8083063 Iustin Pop
    """Build hooks env.
2955 a8083063 Iustin Pop

2956 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2957 a8083063 Iustin Pop

2958 a8083063 Iustin Pop
    """
2959 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
2960 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2961 a8083063 Iustin Pop
    return env, nl, nl
2962 a8083063 Iustin Pop
2963 a8083063 Iustin Pop
  def CheckPrereq(self):
2964 a8083063 Iustin Pop
    """Check prerequisites.
2965 a8083063 Iustin Pop

2966 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2967 a8083063 Iustin Pop

2968 a8083063 Iustin Pop
    """
2969 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2970 e873317a Guido Trotter
    assert self.instance is not None, \
2971 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2972 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2973 a8083063 Iustin Pop
2974 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2975 a8083063 Iustin Pop
    """Shutdown the instance.
2976 a8083063 Iustin Pop

2977 a8083063 Iustin Pop
    """
2978 a8083063 Iustin Pop
    instance = self.instance
2979 a8083063 Iustin Pop
    node_current = instance.primary_node
2980 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2981 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
2982 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
2983 1fae010f Iustin Pop
    if msg:
2984 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
2985 a8083063 Iustin Pop
2986 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
2987 a8083063 Iustin Pop
2988 a8083063 Iustin Pop
2989 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2990 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2991 fe7b0351 Michael Hanselmann

2992 fe7b0351 Michael Hanselmann
  """
2993 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2994 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2995 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2996 4e0b4d2d Guido Trotter
  REQ_BGL = False
2997 4e0b4d2d Guido Trotter
2998 4e0b4d2d Guido Trotter
  def ExpandNames(self):
2999 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3000 fe7b0351 Michael Hanselmann
3001 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3002 fe7b0351 Michael Hanselmann
    """Build hooks env.
3003 fe7b0351 Michael Hanselmann

3004 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3005 fe7b0351 Michael Hanselmann

3006 fe7b0351 Michael Hanselmann
    """
3007 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3008 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3009 fe7b0351 Michael Hanselmann
    return env, nl, nl
3010 fe7b0351 Michael Hanselmann
3011 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3012 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3013 fe7b0351 Michael Hanselmann

3014 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3015 fe7b0351 Michael Hanselmann

3016 fe7b0351 Michael Hanselmann
    """
3017 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3018 4e0b4d2d Guido Trotter
    assert instance is not None, \
3019 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3020 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3021 4e0b4d2d Guido Trotter
3022 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3023 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3024 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3025 0d68c45d Iustin Pop
    if instance.admin_up:
3026 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3027 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3028 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3029 72737a7f Iustin Pop
                                              instance.name,
3030 72737a7f Iustin Pop
                                              instance.hypervisor)
3031 b4874c9e Guido Trotter
    remote_info.Raise()
3032 b4874c9e Guido Trotter
    if remote_info.data:
3033 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3034 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3035 3ecf6786 Iustin Pop
                                  instance.primary_node))
3036 d0834de3 Michael Hanselmann
3037 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3038 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3039 d0834de3 Michael Hanselmann
      # OS verification
3040 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3041 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3042 d0834de3 Michael Hanselmann
      if pnode is None:
3043 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3044 3ecf6786 Iustin Pop
                                   self.op.pnode)
3045 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3046 781de953 Iustin Pop
      result.Raise()
3047 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
3048 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3049 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
3050 d0834de3 Michael Hanselmann
3051 fe7b0351 Michael Hanselmann
    self.instance = instance
3052 fe7b0351 Michael Hanselmann
3053 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3054 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3055 fe7b0351 Michael Hanselmann

3056 fe7b0351 Michael Hanselmann
    """
3057 fe7b0351 Michael Hanselmann
    inst = self.instance
3058 fe7b0351 Michael Hanselmann
3059 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3060 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3061 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3062 97abc79f Iustin Pop
      self.cfg.Update(inst)
3063 d0834de3 Michael Hanselmann
3064 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3065 fe7b0351 Michael Hanselmann
    try:
3066 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3067 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
3068 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
3069 20e01edd Iustin Pop
      if msg:
3070 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
3071 20e01edd Iustin Pop
                                 " on node %s: %s" %
3072 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
3073 fe7b0351 Michael Hanselmann
    finally:
3074 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3075 fe7b0351 Michael Hanselmann
3076 fe7b0351 Michael Hanselmann
3077 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3078 decd5f45 Iustin Pop
  """Rename an instance.
3079 decd5f45 Iustin Pop

3080 decd5f45 Iustin Pop
  """
3081 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3082 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3083 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3084 decd5f45 Iustin Pop
3085 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3086 decd5f45 Iustin Pop
    """Build hooks env.
3087 decd5f45 Iustin Pop

3088 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3089 decd5f45 Iustin Pop

3090 decd5f45 Iustin Pop
    """
3091 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3092 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3093 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3094 decd5f45 Iustin Pop
    return env, nl, nl
3095 decd5f45 Iustin Pop
3096 decd5f45 Iustin Pop
  def CheckPrereq(self):
3097 decd5f45 Iustin Pop
    """Check prerequisites.
3098 decd5f45 Iustin Pop

3099 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3100 decd5f45 Iustin Pop

3101 decd5f45 Iustin Pop
    """
3102 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3103 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3104 decd5f45 Iustin Pop
    if instance is None:
3105 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3106 decd5f45 Iustin Pop
                                 self.op.instance_name)
3107 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3108 7527a8a4 Iustin Pop
3109 0d68c45d Iustin Pop
    if instance.admin_up:
3110 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3111 decd5f45 Iustin Pop
                                 self.op.instance_name)
3112 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3113 72737a7f Iustin Pop
                                              instance.name,
3114 72737a7f Iustin Pop
                                              instance.hypervisor)
3115 781de953 Iustin Pop
    remote_info.Raise()
3116 781de953 Iustin Pop
    if remote_info.data:
3117 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3118 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3119 decd5f45 Iustin Pop
                                  instance.primary_node))
3120 decd5f45 Iustin Pop
    self.instance = instance
3121 decd5f45 Iustin Pop
3122 decd5f45 Iustin Pop
    # new name verification
3123 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3124 decd5f45 Iustin Pop
3125 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3126 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3127 7bde3275 Guido Trotter
    if new_name in instance_list:
3128 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3129 c09f363f Manuel Franceschini
                                 new_name)
3130 7bde3275 Guido Trotter
3131 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3132 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3133 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3134 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3135 decd5f45 Iustin Pop
3136 decd5f45 Iustin Pop
3137 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3138 decd5f45 Iustin Pop
    """Reinstall the instance.
3139 decd5f45 Iustin Pop

3140 decd5f45 Iustin Pop
    """
3141 decd5f45 Iustin Pop
    inst = self.instance
3142 decd5f45 Iustin Pop
    old_name = inst.name
3143 decd5f45 Iustin Pop
3144 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3145 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3146 b23c4333 Manuel Franceschini
3147 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3148 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3149 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3150 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3151 decd5f45 Iustin Pop
3152 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3153 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3154 decd5f45 Iustin Pop
3155 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3156 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3157 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3158 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3159 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3160 781de953 Iustin Pop
      result.Raise()
3161 781de953 Iustin Pop
      if not result.data:
3162 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3163 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3164 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3165 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3166 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3167 b23c4333 Manuel Franceschini
3168 781de953 Iustin Pop
      if not result.data[0]:
3169 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3170 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3171 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3172 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3173 b23c4333 Manuel Franceschini
3174 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3175 decd5f45 Iustin Pop
    try:
3176 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3177 781de953 Iustin Pop
                                                 old_name)
3178 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3179 96841384 Iustin Pop
      if msg:
3180 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3181 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3182 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3183 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3184 decd5f45 Iustin Pop
    finally:
3185 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3186 decd5f45 Iustin Pop
3187 decd5f45 Iustin Pop
3188 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3189 a8083063 Iustin Pop
  """Remove an instance.
3190 a8083063 Iustin Pop

3191 a8083063 Iustin Pop
  """
3192 a8083063 Iustin Pop
  HPATH = "instance-remove"
3193 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3194 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3195 cf472233 Guido Trotter
  REQ_BGL = False
3196 cf472233 Guido Trotter
3197 cf472233 Guido Trotter
  def ExpandNames(self):
3198 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3199 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3200 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3201 cf472233 Guido Trotter
3202 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3203 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3204 cf472233 Guido Trotter
      self._LockInstancesNodes()
3205 a8083063 Iustin Pop
3206 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3207 a8083063 Iustin Pop
    """Build hooks env.
3208 a8083063 Iustin Pop

3209 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3210 a8083063 Iustin Pop

3211 a8083063 Iustin Pop
    """
3212 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3213 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3214 a8083063 Iustin Pop
    return env, nl, nl
3215 a8083063 Iustin Pop
3216 a8083063 Iustin Pop
  def CheckPrereq(self):
3217 a8083063 Iustin Pop
    """Check prerequisites.
3218 a8083063 Iustin Pop

3219 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3220 a8083063 Iustin Pop

3221 a8083063 Iustin Pop
    """
3222 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3223 cf472233 Guido Trotter
    assert self.instance is not None, \
3224 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3225 a8083063 Iustin Pop
3226 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3227 a8083063 Iustin Pop
    """Remove the instance.
3228 a8083063 Iustin Pop

3229 a8083063 Iustin Pop
    """
3230 a8083063 Iustin Pop
    instance = self.instance
3231 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3232 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3233 a8083063 Iustin Pop
3234 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3235 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3236 1fae010f Iustin Pop
    if msg:
3237 1d67656e Iustin Pop
      if self.op.ignore_failures:
3238 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3239 1d67656e Iustin Pop
      else:
3240 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3241 1fae010f Iustin Pop
                                 " node %s: %s" %
3242 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3243 a8083063 Iustin Pop
3244 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3245 a8083063 Iustin Pop
3246 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3247 1d67656e Iustin Pop
      if self.op.ignore_failures:
3248 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3249 1d67656e Iustin Pop
      else:
3250 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3251 a8083063 Iustin Pop
3252 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3255 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3256 a8083063 Iustin Pop
3257 a8083063 Iustin Pop
3258 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3259 a8083063 Iustin Pop
  """Logical unit for querying instances.
3260 a8083063 Iustin Pop

3261 a8083063 Iustin Pop
  """
3262 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3263 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3264 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3265 5b460366 Iustin Pop
                                    "admin_state",
3266 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3267 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3268 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3269 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3270 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3271 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3272 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3273 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3274 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3275 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3276 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3277 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3278 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3279 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3280 31bf511f Iustin Pop
3281 a8083063 Iustin Pop
3282 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3283 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3284 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3285 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3286 a8083063 Iustin Pop
3287 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3288 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3289 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3290 7eb9d8f7 Guido Trotter
3291 57a2fb91 Iustin Pop
    if self.op.names:
3292 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3293 7eb9d8f7 Guido Trotter
    else:
3294 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3295 7eb9d8f7 Guido Trotter
3296 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3297 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3298 57a2fb91 Iustin Pop
    if self.do_locking:
3299 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3300 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3301 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3302 7eb9d8f7 Guido Trotter
3303 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3304 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3305 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3306 7eb9d8f7 Guido Trotter
3307 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3308 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3309 7eb9d8f7 Guido Trotter

3310 7eb9d8f7 Guido Trotter
    """
3311 57a2fb91 Iustin Pop
    pass
3312 069dcc86 Iustin Pop
3313 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3314 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3315 a8083063 Iustin Pop

3316 a8083063 Iustin Pop
    """
3317 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3318 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3319 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3320 a7f5dc98 Iustin Pop
      if self.do_locking:
3321 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3322 a7f5dc98 Iustin Pop
      else:
3323 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3324 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3325 57a2fb91 Iustin Pop
    else:
3326 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3327 a7f5dc98 Iustin Pop
      if self.do_locking:
3328 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3329 a7f5dc98 Iustin Pop
      else:
3330 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3331 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3332 a7f5dc98 Iustin Pop
      if missing:
3333 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3334 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3335 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3336 c1f1cbb2 Iustin Pop
3337 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3338 a8083063 Iustin Pop
3339 a8083063 Iustin Pop
    # begin data gathering
3340 a8083063 Iustin Pop
3341 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3342 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3343 a8083063 Iustin Pop
3344 a8083063 Iustin Pop
    bad_nodes = []
3345 cbfc4681 Iustin Pop
    off_nodes = []
3346 ec79568d Iustin Pop
    if self.do_node_query:
3347 a8083063 Iustin Pop
      live_data = {}
3348 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3349 a8083063 Iustin Pop
      for name in nodes:
3350 a8083063 Iustin Pop
        result = node_data[name]
3351 cbfc4681 Iustin Pop
        if result.offline:
3352 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3353 cbfc4681 Iustin Pop
          off_nodes.append(name)
3354 781de953 Iustin Pop
        if result.failed:
3355 a8083063 Iustin Pop
          bad_nodes.append(name)
3356 781de953 Iustin Pop
        else:
3357 781de953 Iustin Pop
          if result.data:
3358 781de953 Iustin Pop
            live_data.update(result.data)
3359 781de953 Iustin Pop
            # else no instance is alive
3360 a8083063 Iustin Pop
    else:
3361 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3362 a8083063 Iustin Pop
3363 a8083063 Iustin Pop
    # end data gathering
3364 a8083063 Iustin Pop
3365 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3366 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3367 a8083063 Iustin Pop
    output = []
3368 a8083063 Iustin Pop
    for instance in instance_list:
3369 a8083063 Iustin Pop
      iout = []
3370 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3371 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3372 a8083063 Iustin Pop
      for field in self.op.output_fields:
3373 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3374 a8083063 Iustin Pop
        if field == "name":
3375 a8083063 Iustin Pop
          val = instance.name
3376 a8083063 Iustin Pop
        elif field == "os":
3377 a8083063 Iustin Pop
          val = instance.os
3378 a8083063 Iustin Pop
        elif field == "pnode":
3379 a8083063 Iustin Pop
          val = instance.primary_node
3380 a8083063 Iustin Pop
        elif field == "snodes":
3381 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3382 a8083063 Iustin Pop
        elif field == "admin_state":
3383 0d68c45d Iustin Pop
          val = instance.admin_up
3384 a8083063 Iustin Pop
        elif field == "oper_state":
3385 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3386 8a23d2d3 Iustin Pop
            val = None
3387 a8083063 Iustin Pop
          else:
3388 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3389 d8052456 Iustin Pop
        elif field == "status":
3390 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3391 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3392 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3393 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3394 d8052456 Iustin Pop
          else:
3395 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3396 d8052456 Iustin Pop
            if running:
3397 0d68c45d Iustin Pop
              if instance.admin_up:
3398 d8052456 Iustin Pop
                val = "running"
3399 d8052456 Iustin Pop
              else:
3400 d8052456 Iustin Pop
                val = "ERROR_up"
3401 d8052456 Iustin Pop
            else:
3402 0d68c45d Iustin Pop
              if instance.admin_up:
3403 d8052456 Iustin Pop
                val = "ERROR_down"
3404 d8052456 Iustin Pop
              else:
3405 d8052456 Iustin Pop
                val = "ADMIN_down"
3406 a8083063 Iustin Pop
        elif field == "oper_ram":
3407 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3408 8a23d2d3 Iustin Pop
            val = None
3409 a8083063 Iustin Pop
          elif instance.name in live_data:
3410 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3411 a8083063 Iustin Pop
          else:
3412 a8083063 Iustin Pop
            val = "-"
3413 a8083063 Iustin Pop
        elif field == "disk_template":
3414 a8083063 Iustin Pop
          val = instance.disk_template
3415 a8083063 Iustin Pop
        elif field == "ip":
3416 39a02558 Guido Trotter
          if instance.nics:
3417 39a02558 Guido Trotter
            val = instance.nics[0].ip
3418 39a02558 Guido Trotter
          else:
3419 39a02558 Guido Trotter
            val = None
3420 a8083063 Iustin Pop
        elif field == "bridge":
3421 39a02558 Guido Trotter
          if instance.nics:
3422 39a02558 Guido Trotter
            val = instance.nics[0].bridge
3423 39a02558 Guido Trotter
          else:
3424 39a02558 Guido Trotter
            val = None
3425 a8083063 Iustin Pop
        elif field == "mac":
3426 39a02558 Guido Trotter
          if instance.nics:
3427 39a02558 Guido Trotter
            val = instance.nics[0].mac
3428 39a02558 Guido Trotter
          else:
3429 39a02558 Guido Trotter
            val = None
3430 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3431 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3432 ad24e046 Iustin Pop
          try:
3433 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3434 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3435 8a23d2d3 Iustin Pop
            val = None
3436 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3437 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3438 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3439 130a6a6f Iustin Pop
        elif field == "tags":
3440 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3441 38d7239a Iustin Pop
        elif field == "serial_no":
3442 38d7239a Iustin Pop
          val = instance.serial_no
3443 5018a335 Iustin Pop
        elif field == "network_port":
3444 5018a335 Iustin Pop
          val = instance.network_port
3445 338e51e8 Iustin Pop
        elif field == "hypervisor":
3446 338e51e8 Iustin Pop
          val = instance.hypervisor
3447 338e51e8 Iustin Pop
        elif field == "hvparams":
3448 338e51e8 Iustin Pop
          val = i_hv
3449 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3450 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3451 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3452 338e51e8 Iustin Pop
        elif field == "beparams":
3453 338e51e8 Iustin Pop
          val = i_be
3454 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3455 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3456 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3457 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3458 71c1af58 Iustin Pop
          # matches a variable list
3459 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3460 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3461 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3462 71c1af58 Iustin Pop
              val = len(instance.disks)
3463 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3464 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3465 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3466 3e0cea06 Iustin Pop
              try:
3467 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3468 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3469 71c1af58 Iustin Pop
                val = None
3470 71c1af58 Iustin Pop
            else:
3471 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3472 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3473 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3474 71c1af58 Iustin Pop
              val = len(instance.nics)
3475 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3476 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3477 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3478 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3479 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3480 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3481 71c1af58 Iustin Pop
            else:
3482 71c1af58 Iustin Pop
              # index-based item
3483 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3484 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3485 71c1af58 Iustin Pop
                val = None
3486 71c1af58 Iustin Pop
              else:
3487 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3488 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3489 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3490 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3491 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3492 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3493 71c1af58 Iustin Pop
                else:
3494 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3495 71c1af58 Iustin Pop
          else:
3496 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3497 a8083063 Iustin Pop
        else:
3498 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3499 a8083063 Iustin Pop
        iout.append(val)
3500 a8083063 Iustin Pop
      output.append(iout)
3501 a8083063 Iustin Pop
3502 a8083063 Iustin Pop
    return output
3503 a8083063 Iustin Pop
3504 a8083063 Iustin Pop
3505 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3506 a8083063 Iustin Pop
  """Failover an instance.
3507 a8083063 Iustin Pop

3508 a8083063 Iustin Pop
  """
3509 a8083063 Iustin Pop
  HPATH = "instance-failover"
3510 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3511 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3512 c9e5c064 Guido Trotter
  REQ_BGL = False
3513 c9e5c064 Guido Trotter
3514 c9e5c064 Guido Trotter
  def ExpandNames(self):
3515 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3516 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3517 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3518 c9e5c064 Guido Trotter
3519 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3520 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3521 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3522 a8083063 Iustin Pop
3523 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3524 a8083063 Iustin Pop
    """Build hooks env.
3525 a8083063 Iustin Pop

3526 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3527 a8083063 Iustin Pop

3528 a8083063 Iustin Pop
    """
3529 a8083063 Iustin Pop
    env = {
3530 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3531 a8083063 Iustin Pop
      }
3532 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3533 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3534 a8083063 Iustin Pop
    return env, nl, nl
3535 a8083063 Iustin Pop
3536 a8083063 Iustin Pop
  def CheckPrereq(self):
3537 a8083063 Iustin Pop
    """Check prerequisites.
3538 a8083063 Iustin Pop

3539 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3540 a8083063 Iustin Pop

3541 a8083063 Iustin Pop
    """
3542 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3543 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3544 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3545 a8083063 Iustin Pop
3546 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3547 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3548 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3549 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3550 2a710df1 Michael Hanselmann
3551 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3552 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3553 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3554 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3555 2a710df1 Michael Hanselmann
3556 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3557 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3558 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3559 d27776f0 Iustin Pop
3560 d27776f0 Iustin Pop
    if instance.admin_up:
3561 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3562 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3563 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3564 d27776f0 Iustin Pop
                           instance.hypervisor)
3565 d27776f0 Iustin Pop
    else:
3566 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3567 d27776f0 Iustin Pop
                   " instance will not be started")
3568 3a7c308e Guido Trotter
3569 a8083063 Iustin Pop
    # check bridge existance
3570 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3571 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3572 781de953 Iustin Pop
    result.Raise()
3573 781de953 Iustin Pop
    if not result.data:
3574 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3575 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3576 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3577 a8083063 Iustin Pop
3578 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3579 a8083063 Iustin Pop
    """Failover an instance.
3580 a8083063 Iustin Pop

3581 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3582 a8083063 Iustin Pop
    starting it on the secondary.
3583 a8083063 Iustin Pop

3584 a8083063 Iustin Pop
    """
3585 a8083063 Iustin Pop
    instance = self.instance
3586 a8083063 Iustin Pop
3587 a8083063 Iustin Pop
    source_node = instance.primary_node
3588 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3589 a8083063 Iustin Pop
3590 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3591 a8083063 Iustin Pop
    for dev in instance.disks:
3592 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3593 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3594 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3595 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3596 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3597 a8083063 Iustin Pop
3598 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3599 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3600 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3601 a8083063 Iustin Pop
3602 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3603 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3604 1fae010f Iustin Pop
    if msg:
3605 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3606 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3607 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3608 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3609 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3610 24a40d57 Iustin Pop
      else:
3611 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3612 1fae010f Iustin Pop
                                 " node %s: %s" %
3613 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3614 a8083063 Iustin Pop
3615 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3616 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3617 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3618 a8083063 Iustin Pop
3619 a8083063 Iustin Pop
    instance.primary_node = target_node
3620 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3621 b6102dab Guido Trotter
    self.cfg.Update(instance)
3622 a8083063 Iustin Pop
3623 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3624 0d68c45d Iustin Pop
    if instance.admin_up:
3625 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3626 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3627 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3628 12a0cfbe Guido Trotter
3629 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3630 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3631 12a0cfbe Guido Trotter
      if not disks_ok:
3632 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3633 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3634 a8083063 Iustin Pop
3635 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3636 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3637 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3638 dd279568 Iustin Pop
      if msg:
3639 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3640 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3641 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3642 a8083063 Iustin Pop
3643 a8083063 Iustin Pop
3644 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3645 53c776b5 Iustin Pop
  """Migrate an instance.
3646 53c776b5 Iustin Pop

3647 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3648 53c776b5 Iustin Pop
  which is done with shutdown.
3649 53c776b5 Iustin Pop

3650 53c776b5 Iustin Pop
  """
3651 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3652 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3653 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3654 53c776b5 Iustin Pop
3655 53c776b5 Iustin Pop
  REQ_BGL = False
3656 53c776b5 Iustin Pop
3657 53c776b5 Iustin Pop
  def ExpandNames(self):
3658 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3659 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3660 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3661 53c776b5 Iustin Pop
3662 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3663 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3664 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3665 53c776b5 Iustin Pop
3666 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3667 53c776b5 Iustin Pop
    """Build hooks env.
3668 53c776b5 Iustin Pop

3669 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3670 53c776b5 Iustin Pop

3671 53c776b5 Iustin Pop
    """
3672 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3673 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3674 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3675 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3676 53c776b5 Iustin Pop
    return env, nl, nl
3677 53c776b5 Iustin Pop
3678 53c776b5 Iustin Pop
  def CheckPrereq(self):
3679 53c776b5 Iustin Pop
    """Check prerequisites.
3680 53c776b5 Iustin Pop

3681 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3682 53c776b5 Iustin Pop

3683 53c776b5 Iustin Pop
    """
3684 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3685 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3686 53c776b5 Iustin Pop
    if instance is None:
3687 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3688 53c776b5 Iustin Pop
                                 self.op.instance_name)
3689 53c776b5 Iustin Pop
3690 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3691 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3692 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3693 53c776b5 Iustin Pop
3694 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3695 53c776b5 Iustin Pop
    if not secondary_nodes:
3696 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3697 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3698 53c776b5 Iustin Pop
3699 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3700 53c776b5 Iustin Pop
3701 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3702 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3703 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3704 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3705 53c776b5 Iustin Pop
                         instance.hypervisor)
3706 53c776b5 Iustin Pop
3707 53c776b5 Iustin Pop
    # check bridge existance
3708 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3709 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3710 53c776b5 Iustin Pop
    if result.failed or not result.data:
3711 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3712 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3713 53c776b5 Iustin Pop
                                 (brlist, target_node))
3714 53c776b5 Iustin Pop
3715 53c776b5 Iustin Pop
    if not self.op.cleanup:
3716 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3717 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3718 53c776b5 Iustin Pop
                                                 instance)
3719 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3720 53c776b5 Iustin Pop
      if msg:
3721 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3722 53c776b5 Iustin Pop
                                   msg)
3723 53c776b5 Iustin Pop
3724 53c776b5 Iustin Pop
    self.instance = instance
3725 53c776b5 Iustin Pop
3726 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3727 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3728 53c776b5 Iustin Pop

3729 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3730 53c776b5 Iustin Pop

3731 53c776b5 Iustin Pop
    """
3732 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3733 53c776b5 Iustin Pop
    all_done = False
3734 53c776b5 Iustin Pop
    while not all_done:
3735 53c776b5 Iustin Pop
      all_done = True
3736 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3737 53c776b5 Iustin Pop
                                            self.nodes_ip,
3738 53c776b5 Iustin Pop
                                            self.instance.disks)
3739 53c776b5 Iustin Pop
      min_percent = 100
3740 53c776b5 Iustin Pop
      for node, nres in result.items():
3741 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3742 53c776b5 Iustin Pop
        if msg:
3743 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3744 53c776b5 Iustin Pop
                                   (node, msg))
3745 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3746 53c776b5 Iustin Pop
        all_done = all_done and node_done
3747 53c776b5 Iustin Pop
        if node_percent is not None:
3748 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3749 53c776b5 Iustin Pop
      if not all_done:
3750 53c776b5 Iustin Pop
        if min_percent < 100:
3751 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3752 53c776b5 Iustin Pop
        time.sleep(2)
3753 53c776b5 Iustin Pop
3754 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3755 53c776b5 Iustin Pop
    """Demote a node to secondary.
3756 53c776b5 Iustin Pop

3757 53c776b5 Iustin Pop
    """
3758 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3759 53c776b5 Iustin Pop
3760 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3761 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3762 53c776b5 Iustin Pop
3763 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3764 53c776b5 Iustin Pop
                                          self.instance.disks)
3765 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3766 53c776b5 Iustin Pop
    if msg:
3767 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3768 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3769 53c776b5 Iustin Pop
3770 53c776b5 Iustin Pop
  def _GoStandalone(self):
3771 53c776b5 Iustin Pop
    """Disconnect from the network.
3772 53c776b5 Iustin Pop

3773 53c776b5 Iustin Pop
    """
3774 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3775 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3776 53c776b5 Iustin Pop
                                               self.instance.disks)
3777 53c776b5 Iustin Pop
    for node, nres in result.items():
3778 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3779 53c776b5 Iustin Pop
      if msg:
3780 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3781 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3782 53c776b5 Iustin Pop
3783 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3784 53c776b5 Iustin Pop
    """Reconnect to the network.
3785 53c776b5 Iustin Pop

3786 53c776b5 Iustin Pop
    """
3787 53c776b5 Iustin Pop
    if multimaster:
3788 53c776b5 Iustin Pop
      msg = "dual-master"
3789 53c776b5 Iustin Pop
    else:
3790 53c776b5 Iustin Pop
      msg = "single-master"
3791 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3792 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3793 53c776b5 Iustin Pop
                                           self.instance.disks,
3794 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3795 53c776b5 Iustin Pop
    for node, nres in result.items():
3796 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3797 53c776b5 Iustin Pop
      if msg:
3798 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3799 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3800 53c776b5 Iustin Pop
3801 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3802 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3803 53c776b5 Iustin Pop

3804 53c776b5 Iustin Pop
    The cleanup is done by:
3805 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3806 53c776b5 Iustin Pop
        (and update the config if needed)
3807 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3808 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3809 53c776b5 Iustin Pop
      - disconnect from the network
3810 53c776b5 Iustin Pop
      - change disks into single-master mode
3811 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3812 53c776b5 Iustin Pop

3813 53c776b5 Iustin Pop
    """
3814 53c776b5 Iustin Pop
    instance = self.instance
3815 53c776b5 Iustin Pop
    target_node = self.target_node
3816 53c776b5 Iustin Pop
    source_node = self.source_node
3817 53c776b5 Iustin Pop
3818 53c776b5 Iustin Pop
    # check running on only one node
3819 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3820 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3821 53c776b5 Iustin Pop
                     " a bad state)")
3822 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3823 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3824 53c776b5 Iustin Pop
      result.Raise()
3825 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
3826 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
3827 53c776b5 Iustin Pop
3828 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
3829 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
3830 53c776b5 Iustin Pop
3831 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3832 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3833 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3834 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3835 53c776b5 Iustin Pop
                               " and restart this operation.")
3836 53c776b5 Iustin Pop
3837 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3838 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3839 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3840 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3841 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3842 53c776b5 Iustin Pop
3843 53c776b5 Iustin Pop
    if runningon_target:
3844 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3845 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3846 53c776b5 Iustin Pop
                       " updating config" % target_node)
3847 53c776b5 Iustin Pop
      instance.primary_node = target_node
3848 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3849 53c776b5 Iustin Pop
      demoted_node = source_node
3850 53c776b5 Iustin Pop
    else:
3851 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3852 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3853 53c776b5 Iustin Pop
      demoted_node = target_node
3854 53c776b5 Iustin Pop
3855 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3856 53c776b5 Iustin Pop
    try:
3857 53c776b5 Iustin Pop
      self._WaitUntilSync()
3858 53c776b5 Iustin Pop
    except errors.OpExecError:
3859 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3860 53c776b5 Iustin Pop
      # won't be able to sync
3861 53c776b5 Iustin Pop
      pass
3862 53c776b5 Iustin Pop
    self._GoStandalone()
3863 53c776b5 Iustin Pop
    self._GoReconnect(False)
3864 53c776b5 Iustin Pop
    self._WaitUntilSync()
3865 53c776b5 Iustin Pop
3866 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3867 53c776b5 Iustin Pop
3868 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3869 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3870 6906a9d8 Guido Trotter

3871 6906a9d8 Guido Trotter
    """
3872 6906a9d8 Guido Trotter
    target_node = self.target_node
3873 6906a9d8 Guido Trotter
    try:
3874 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3875 6906a9d8 Guido Trotter
      self._GoStandalone()
3876 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3877 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3878 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3879 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3880 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3881 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3882 6906a9d8 Guido Trotter
                      str(err))
3883 6906a9d8 Guido Trotter
3884 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3885 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3886 6906a9d8 Guido Trotter

3887 6906a9d8 Guido Trotter
    """
3888 6906a9d8 Guido Trotter
    instance = self.instance
3889 6906a9d8 Guido Trotter
    target_node = self.target_node
3890 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3891 6906a9d8 Guido Trotter
3892 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3893 6906a9d8 Guido Trotter
                                                    instance,
3894 6906a9d8 Guido Trotter
                                                    migration_info,
3895 6906a9d8 Guido Trotter
                                                    False)
3896 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
3897 6906a9d8 Guido Trotter
    if abort_msg:
3898 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
3899 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
3900 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
3901 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
3902 6906a9d8 Guido Trotter
3903 53c776b5 Iustin Pop
  def _ExecMigration(self):
3904 53c776b5 Iustin Pop
    """Migrate an instance.
3905 53c776b5 Iustin Pop

3906 53c776b5 Iustin Pop
    The migrate is done by:
3907 53c776b5 Iustin Pop
      - change the disks into dual-master mode
3908 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
3909 53c776b5 Iustin Pop
      - migrate the instance
3910 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
3911 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3912 53c776b5 Iustin Pop
      - change disks into single-master mode
3913 53c776b5 Iustin Pop

3914 53c776b5 Iustin Pop
    """
3915 53c776b5 Iustin Pop
    instance = self.instance
3916 53c776b5 Iustin Pop
    target_node = self.target_node
3917 53c776b5 Iustin Pop
    source_node = self.source_node
3918 53c776b5 Iustin Pop
3919 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
3920 53c776b5 Iustin Pop
    for dev in instance.disks:
3921 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3922 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
3923 53c776b5 Iustin Pop
                                 " synchronized on target node,"
3924 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
3925 53c776b5 Iustin Pop
3926 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
3927 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
3928 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3929 6906a9d8 Guido Trotter
    if msg:
3930 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
3931 0959c824 Iustin Pop
                 (source_node, msg))
3932 6906a9d8 Guido Trotter
      logging.error(log_err)
3933 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
3934 6906a9d8 Guido Trotter
3935 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
3936 6906a9d8 Guido Trotter
3937 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
3938 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
3939 53c776b5 Iustin Pop
    self._GoStandalone()
3940 53c776b5 Iustin Pop
    self._GoReconnect(True)
3941 53c776b5 Iustin Pop
    self._WaitUntilSync()
3942 53c776b5 Iustin Pop
3943 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3944 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
3945 6906a9d8 Guido Trotter
                                           instance,
3946 6906a9d8 Guido Trotter
                                           migration_info,
3947 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
3948 6906a9d8 Guido Trotter
3949 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3950 6906a9d8 Guido Trotter
    if msg:
3951 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
3952 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
3953 6906a9d8 Guido Trotter
      self._AbortMigration()
3954 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3955 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3956 6906a9d8 Guido Trotter
                               (instance.name, msg))
3957 6906a9d8 Guido Trotter
3958 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
3959 53c776b5 Iustin Pop
    time.sleep(10)
3960 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
3961 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
3962 53c776b5 Iustin Pop
                                            self.op.live)
3963 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3964 53c776b5 Iustin Pop
    if msg:
3965 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
3966 53c776b5 Iustin Pop
                    " disk status: %s", msg)
3967 6906a9d8 Guido Trotter
      self._AbortMigration()
3968 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3969 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3970 53c776b5 Iustin Pop
                               (instance.name, msg))
3971 53c776b5 Iustin Pop
    time.sleep(10)
3972 53c776b5 Iustin Pop
3973 53c776b5 Iustin Pop
    instance.primary_node = target_node
3974 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
3975 53c776b5 Iustin Pop
    self.cfg.Update(instance)
3976 53c776b5 Iustin Pop
3977 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
3978 6906a9d8 Guido Trotter
                                              instance,
3979 6906a9d8 Guido Trotter
                                              migration_info,
3980 6906a9d8 Guido Trotter
                                              True)
3981 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
3982 6906a9d8 Guido Trotter
    if msg:
3983 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
3984 6906a9d8 Guido Trotter
                    " %s" % msg)
3985 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
3986 6906a9d8 Guido Trotter
                               msg)
3987 6906a9d8 Guido Trotter
3988 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
3989 53c776b5 Iustin Pop
    self._WaitUntilSync()
3990 53c776b5 Iustin Pop
    self._GoStandalone()
3991 53c776b5 Iustin Pop
    self._GoReconnect(False)
3992 53c776b5 Iustin Pop
    self._WaitUntilSync()
3993 53c776b5 Iustin Pop
3994 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3995 53c776b5 Iustin Pop
3996 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
3997 53c776b5 Iustin Pop
    """Perform the migration.
3998 53c776b5 Iustin Pop

3999 53c776b5 Iustin Pop
    """
4000 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4001 53c776b5 Iustin Pop
4002 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4003 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4004 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4005 53c776b5 Iustin Pop
    self.nodes_ip = {
4006 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4007 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4008 53c776b5 Iustin Pop
      }
4009 53c776b5 Iustin Pop
    if self.op.cleanup:
4010 53c776b5 Iustin Pop
      return self._ExecCleanup()
4011 53c776b5 Iustin Pop
    else:
4012 53c776b5 Iustin Pop
      return self._ExecMigration()
4013 53c776b5 Iustin Pop
4014 53c776b5 Iustin Pop
4015 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4016 428958aa Iustin Pop
                    info, force_open):
4017 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4018 a8083063 Iustin Pop

4019 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4020 a8083063 Iustin Pop
  all its children.
4021 a8083063 Iustin Pop

4022 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4023 a8083063 Iustin Pop

4024 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4025 428958aa Iustin Pop
  @param node: the node on which to create the device
4026 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4027 428958aa Iustin Pop
  @param instance: the instance which owns the device
4028 428958aa Iustin Pop
  @type device: L{objects.Disk}
4029 428958aa Iustin Pop
  @param device: the device to create
4030 428958aa Iustin Pop
  @type force_create: boolean
4031 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4032 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4033 428958aa Iustin Pop
      CreateOnSecondary() attribute
4034 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4035 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4036 428958aa Iustin Pop
  @type force_open: boolean
4037 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4038 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4039 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4040 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4041 428958aa Iustin Pop

4042 a8083063 Iustin Pop
  """
4043 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4044 428958aa Iustin Pop
    force_create = True
4045 796cab27 Iustin Pop
4046 a8083063 Iustin Pop
  if device.children:
4047 a8083063 Iustin Pop
    for child in device.children:
4048 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4049 428958aa Iustin Pop
                      info, force_open)
4050 a8083063 Iustin Pop
4051 428958aa Iustin Pop
  if not force_create:
4052 796cab27 Iustin Pop
    return
4053 796cab27 Iustin Pop
4054 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4055 de12473a Iustin Pop
4056 de12473a Iustin Pop
4057 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4058 de12473a Iustin Pop
  """Create a single block device on a given node.
4059 de12473a Iustin Pop

4060 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4061 de12473a Iustin Pop
  created in advance.
4062 de12473a Iustin Pop

4063 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4064 de12473a Iustin Pop
  @param node: the node on which to create the device
4065 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4066 de12473a Iustin Pop
  @param instance: the instance which owns the device
4067 de12473a Iustin Pop
  @type device: L{objects.Disk}
4068 de12473a Iustin Pop
  @param device: the device to create
4069 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4070 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4071 de12473a Iustin Pop
  @type force_open: boolean
4072 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4073 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4074 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4075 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4076 de12473a Iustin Pop

4077 de12473a Iustin Pop
  """
4078 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4079 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4080 428958aa Iustin Pop
                                       instance.name, force_open, info)
4081 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
4082 7d81697f Iustin Pop
  if msg:
4083 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
4084 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
4085 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
4086 a8083063 Iustin Pop
  if device.physical_id is None:
4087 0959c824 Iustin Pop
    device.physical_id = result.payload
4088 a8083063 Iustin Pop
4089 a8083063 Iustin Pop
4090 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4091 923b1523 Iustin Pop
  """Generate a suitable LV name.
4092 923b1523 Iustin Pop

4093 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4094 923b1523 Iustin Pop

4095 923b1523 Iustin Pop
  """
4096 923b1523 Iustin Pop
  results = []
4097 923b1523 Iustin Pop
  for val in exts:
4098 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4099 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4100 923b1523 Iustin Pop
  return results
4101 923b1523 Iustin Pop
4102 923b1523 Iustin Pop
4103 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4104 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4105 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4106 a1f445d3 Iustin Pop

4107 a1f445d3 Iustin Pop
  """
4108 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4109 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4110 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4111 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4112 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4113 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4114 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4115 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4116 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4117 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4118 f9518d38 Iustin Pop
                                      shared_secret),
4119 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4120 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4121 a1f445d3 Iustin Pop
  return drbd_dev
4122 a1f445d3 Iustin Pop
4123 7c0d6283 Michael Hanselmann
4124 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4125 a8083063 Iustin Pop
                          instance_name, primary_node,
4126 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4127 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4128 e2a65344 Iustin Pop
                          base_index):
4129 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4130 a8083063 Iustin Pop

4131 a8083063 Iustin Pop
  """
4132 a8083063 Iustin Pop
  #TODO: compute space requirements
4133 a8083063 Iustin Pop
4134 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4135 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4136 08db7c5c Iustin Pop
  disks = []
4137 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4138 08db7c5c Iustin Pop
    pass
4139 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4140 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4141 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4142 923b1523 Iustin Pop
4143 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4144 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4145 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4146 e2a65344 Iustin Pop
      disk_index = idx + base_index
4147 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4148 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4149 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4150 6ec66eae Iustin Pop
                              mode=disk["mode"])
4151 08db7c5c Iustin Pop
      disks.append(disk_dev)
4152 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4153 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4154 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4155 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4156 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4157 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4158 08db7c5c Iustin Pop
4159 e6c1ff2f Iustin Pop
    names = []
4160 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4161 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4162 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4163 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4164 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4165 112050d9 Iustin Pop
      disk_index = idx + base_index
4166 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4167 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4168 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4169 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4170 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4171 08db7c5c Iustin Pop
      disks.append(disk_dev)
4172 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4173 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4174 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4175 0f1a06e3 Manuel Franceschini
4176 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4177 112050d9 Iustin Pop
      disk_index = idx + base_index
4178 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4179 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4180 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4181 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4182 43e99cff Guido Trotter
                                                         disk_index)),
4183 6ec66eae Iustin Pop
                              mode=disk["mode"])
4184 08db7c5c Iustin Pop
      disks.append(disk_dev)
4185 a8083063 Iustin Pop
  else:
4186 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4187 a8083063 Iustin Pop
  return disks
4188 a8083063 Iustin Pop
4189 a8083063 Iustin Pop
4190 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4191 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4192 3ecf6786 Iustin Pop

4193 3ecf6786 Iustin Pop
  """
4194 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4195 a0c3fea1 Michael Hanselmann
4196 a0c3fea1 Michael Hanselmann
4197 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4198 a8083063 Iustin Pop
  """Create all disks for an instance.
4199 a8083063 Iustin Pop

4200 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4201 a8083063 Iustin Pop

4202 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4203 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4204 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4205 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4206 e4376078 Iustin Pop
  @rtype: boolean
4207 e4376078 Iustin Pop
  @return: the success of the creation
4208 a8083063 Iustin Pop

4209 a8083063 Iustin Pop
  """
4210 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4211 428958aa Iustin Pop
  pnode = instance.primary_node
4212 a0c3fea1 Michael Hanselmann
4213 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4214 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4215 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4216 0f1a06e3 Manuel Franceschini
4217 781de953 Iustin Pop
    if result.failed or not result.data:
4218 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4219 0f1a06e3 Manuel Franceschini
4220 781de953 Iustin Pop
    if not result.data[0]:
4221 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4222 796cab27 Iustin Pop
                               file_storage_dir)
4223 0f1a06e3 Manuel Franceschini
4224 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4225 24991749 Iustin Pop
  # LUSetInstanceParams
4226 a8083063 Iustin Pop
  for device in instance.disks:
4227 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4228 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4229 a8083063 Iustin Pop
    #HARDCODE
4230 428958aa Iustin Pop
    for node in instance.all_nodes:
4231 428958aa Iustin Pop
      f_create = node == pnode
4232 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4233 a8083063 Iustin Pop
4234 a8083063 Iustin Pop
4235 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4236 a8083063 Iustin Pop
  """Remove all disks for an instance.
4237 a8083063 Iustin Pop

4238 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4239 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4240 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4241 a8083063 Iustin Pop
  with `_CreateDisks()`).
4242 a8083063 Iustin Pop

4243 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4244 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4245 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4246 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4247 e4376078 Iustin Pop
  @rtype: boolean
4248 e4376078 Iustin Pop
  @return: the success of the removal
4249 a8083063 Iustin Pop

4250 a8083063 Iustin Pop
  """
4251 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4252 a8083063 Iustin Pop
4253 e1bc0878 Iustin Pop
  all_result = True
4254 a8083063 Iustin Pop
  for device in instance.disks:
4255 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4256 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4257 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4258 e1bc0878 Iustin Pop
      if msg:
4259 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4260 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4261 e1bc0878 Iustin Pop
        all_result = False
4262 0f1a06e3 Manuel Franceschini
4263 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4264 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4265 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4266 781de953 Iustin Pop
                                                 file_storage_dir)
4267 781de953 Iustin Pop
    if result.failed or not result.data:
4268 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4269 e1bc0878 Iustin Pop
      all_result = False
4270 0f1a06e3 Manuel Franceschini
4271 e1bc0878 Iustin Pop
  return all_result
4272 a8083063 Iustin Pop
4273 a8083063 Iustin Pop
4274 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4275 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4276 e2fe6369 Iustin Pop

4277 e2fe6369 Iustin Pop
  """
4278 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4279 e2fe6369 Iustin Pop
  req_size_dict = {
4280 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4281 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4282 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4283 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4284 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4285 e2fe6369 Iustin Pop
  }
4286 e2fe6369 Iustin Pop
4287 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4288 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4289 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4290 e2fe6369 Iustin Pop
4291 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4292 e2fe6369 Iustin Pop
4293 e2fe6369 Iustin Pop
4294 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4295 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4296 74409b12 Iustin Pop

4297 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4298 74409b12 Iustin Pop
  used in both instance create and instance modify.
4299 74409b12 Iustin Pop

4300 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4301 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4302 74409b12 Iustin Pop
  @type nodenames: list
4303 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4304 74409b12 Iustin Pop
  @type hvname: string
4305 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4306 74409b12 Iustin Pop
  @type hvparams: dict
4307 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4308 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4309 74409b12 Iustin Pop

4310 74409b12 Iustin Pop
  """
4311 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4312 74409b12 Iustin Pop
                                                  hvname,
4313 74409b12 Iustin Pop
                                                  hvparams)
4314 74409b12 Iustin Pop
  for node in nodenames:
4315 781de953 Iustin Pop
    info = hvinfo[node]
4316 68c6f21c Iustin Pop
    if info.offline:
4317 68c6f21c Iustin Pop
      continue
4318 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4319 0959c824 Iustin Pop
    if msg:
4320 d64769a8 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation"
4321 d64769a8 Iustin Pop
                                 " failed on node %s: %s" % (node, msg))
4322 74409b12 Iustin Pop
4323 74409b12 Iustin Pop
4324 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4325 a8083063 Iustin Pop
  """Create an instance.
4326 a8083063 Iustin Pop

4327 a8083063 Iustin Pop
  """
4328 a8083063 Iustin Pop
  HPATH = "instance-add"
4329 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4330 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4331 08db7c5c Iustin Pop
              "mode", "start",
4332 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4333 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4334 7baf741d Guido Trotter
  REQ_BGL = False
4335 7baf741d Guido Trotter
4336 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4337 7baf741d Guido Trotter
    """Expands and checks one node name.
4338 7baf741d Guido Trotter

4339 7baf741d Guido Trotter
    """
4340 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4341 7baf741d Guido Trotter
    if node_full is None:
4342 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4343 7baf741d Guido Trotter
    return node_full
4344 7baf741d Guido Trotter
4345 7baf741d Guido Trotter
  def ExpandNames(self):
4346 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4347 7baf741d Guido Trotter

4348 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4349 7baf741d Guido Trotter

4350 7baf741d Guido Trotter
    """
4351 7baf741d Guido Trotter
    self.needed_locks = {}
4352 7baf741d Guido Trotter
4353 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4354 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4355 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4356 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4357 7baf741d Guido Trotter
4358 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4359 4b2f38dd Iustin Pop
4360 7baf741d Guido Trotter
    # verify creation mode
4361 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4362 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4363 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4364 7baf741d Guido Trotter
                                 self.op.mode)
4365 4b2f38dd Iustin Pop
4366 7baf741d Guido Trotter
    # disk template and mirror node verification
4367 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4368 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4369 7baf741d Guido Trotter
4370 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4371 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4372 4b2f38dd Iustin Pop
4373 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4374 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4375 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4376 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4377 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4378 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4379 4b2f38dd Iustin Pop
4380 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4381 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4382 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4383 8705eb96 Iustin Pop
                                  self.op.hvparams)
4384 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4385 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4386 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4387 6785674e Iustin Pop
4388 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4389 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4390 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4391 338e51e8 Iustin Pop
                                    self.op.beparams)
4392 338e51e8 Iustin Pop
4393 7baf741d Guido Trotter
    #### instance parameters check
4394 7baf741d Guido Trotter
4395 7baf741d Guido Trotter
    # instance name verification
4396 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4397 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4398 7baf741d Guido Trotter
4399 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4400 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4401 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4402 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4403 7baf741d Guido Trotter
                                 instance_name)
4404 7baf741d Guido Trotter
4405 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4406 7baf741d Guido Trotter
4407 08db7c5c Iustin Pop
    # NIC buildup
4408 08db7c5c Iustin Pop
    self.nics = []
4409 08db7c5c Iustin Pop
    for nic in self.op.nics:
4410 08db7c5c Iustin Pop
      # ip validity checks
4411 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4412 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4413 08db7c5c Iustin Pop
        nic_ip = None
4414 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4415 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4416 08db7c5c Iustin Pop
      else:
4417 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4418 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4419 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4420 08db7c5c Iustin Pop
        nic_ip = ip
4421 08db7c5c Iustin Pop
4422 08db7c5c Iustin Pop
      # MAC address verification
4423 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4424 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4425 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4426 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4427 08db7c5c Iustin Pop
                                     mac)
4428 08db7c5c Iustin Pop
      # bridge verification
4429 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4430 9939547b Iustin Pop
      if bridge is None:
4431 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4432 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4433 08db7c5c Iustin Pop
4434 08db7c5c Iustin Pop
    # disk checks/pre-build
4435 08db7c5c Iustin Pop
    self.disks = []
4436 08db7c5c Iustin Pop
    for disk in self.op.disks:
4437 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4438 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4439 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4440 08db7c5c Iustin Pop
                                   mode)
4441 08db7c5c Iustin Pop
      size = disk.get("size", None)
4442 08db7c5c Iustin Pop
      if size is None:
4443 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4444 08db7c5c Iustin Pop
      try:
4445 08db7c5c Iustin Pop
        size = int(size)
4446 08db7c5c Iustin Pop
      except ValueError:
4447 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4448 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4449 08db7c5c Iustin Pop
4450 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4451 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4452 7baf741d Guido Trotter
4453 7baf741d Guido Trotter
    # file storage checks
4454 7baf741d Guido Trotter
    if (self.op.file_driver and
4455 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4456 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4457 7baf741d Guido Trotter
                                 self.op.file_driver)
4458 7baf741d Guido Trotter
4459 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4460 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4461 7baf741d Guido Trotter
4462 7baf741d Guido Trotter
    ### Node/iallocator related checks
4463 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4464 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4465 7baf741d Guido Trotter
                                 " node must be given")
4466 7baf741d Guido Trotter
4467 7baf741d Guido Trotter
    if self.op.iallocator:
4468 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4469 7baf741d Guido Trotter
    else:
4470 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4471 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4472 7baf741d Guido Trotter
      if self.op.snode is not None:
4473 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4474 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4475 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4476 7baf741d Guido Trotter
4477 7baf741d Guido Trotter
    # in case of import lock the source node too
4478 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4479 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4480 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4481 7baf741d Guido Trotter
4482 b9322a9f Guido Trotter
      if src_path is None:
4483 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4484 b9322a9f Guido Trotter
4485 b9322a9f Guido Trotter
      if src_node is None:
4486 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4487 b9322a9f Guido Trotter
        self.op.src_node = None
4488 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4489 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4490 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4491 b9322a9f Guido Trotter
      else:
4492 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4493 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4494 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4495 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4496 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4497 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4498 7baf741d Guido Trotter
4499 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4500 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4501 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4502 a8083063 Iustin Pop
4503 538475ca Iustin Pop
  def _RunAllocator(self):
4504 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4505 538475ca Iustin Pop

4506 538475ca Iustin Pop
    """
4507 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4508 72737a7f Iustin Pop
    ial = IAllocator(self,
4509 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4510 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4511 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4512 d1c2dd75 Iustin Pop
                     tags=[],
4513 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4514 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4515 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4516 08db7c5c Iustin Pop
                     disks=self.disks,
4517 d1c2dd75 Iustin Pop
                     nics=nics,
4518 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4519 29859cb7 Iustin Pop
                     )
4520 d1c2dd75 Iustin Pop
4521 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4522 d1c2dd75 Iustin Pop
4523 d1c2dd75 Iustin Pop
    if not ial.success:
4524 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4525 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4526 d1c2dd75 Iustin Pop
                                                           ial.info))
4527 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4528 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4529 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4530 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4531 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4532 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4533 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4534 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4535 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4536 27579978 Iustin Pop
    if ial.required_nodes == 2:
4537 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4538 538475ca Iustin Pop
4539 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4540 a8083063 Iustin Pop
    """Build hooks env.
4541 a8083063 Iustin Pop

4542 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4543 a8083063 Iustin Pop

4544 a8083063 Iustin Pop
    """
4545 a8083063 Iustin Pop
    env = {
4546 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4547 a8083063 Iustin Pop
      }
4548 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4549 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4550 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4551 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4552 396e1b78 Michael Hanselmann
4553 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4554 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4555 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4556 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4557 4978db17 Iustin Pop
      status=self.op.start,
4558 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4559 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4560 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4561 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4562 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4563 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4564 67fc3042 Iustin Pop
      bep=self.be_full,
4565 67fc3042 Iustin Pop
      hvp=self.hv_full,
4566 67fc3042 Iustin Pop
      hypervisor=self.op.hypervisor,
4567 396e1b78 Michael Hanselmann
    ))
4568 a8083063 Iustin Pop
4569 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4570 a8083063 Iustin Pop
          self.secondaries)
4571 a8083063 Iustin Pop
    return env, nl, nl
4572 a8083063 Iustin Pop
4573 a8083063 Iustin Pop
4574 a8083063 Iustin Pop
  def CheckPrereq(self):
4575 a8083063 Iustin Pop
    """Check prerequisites.
4576 a8083063 Iustin Pop

4577 a8083063 Iustin Pop
    """
4578 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4579 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4580 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4581 eedc99de Manuel Franceschini
                                 " instances")
4582 eedc99de Manuel Franceschini
4583 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4584 7baf741d Guido Trotter
      src_node = self.op.src_node
4585 7baf741d Guido Trotter
      src_path = self.op.src_path
4586 a8083063 Iustin Pop
4587 c0cbdc67 Guido Trotter
      if src_node is None:
4588 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4589 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4590 c0cbdc67 Guido Trotter
        found = False
4591 c0cbdc67 Guido Trotter
        for node in exp_list:
4592 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4593 c0cbdc67 Guido Trotter
            found = True
4594 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4595 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4596 c0cbdc67 Guido Trotter
                                                       src_path)
4597 c0cbdc67 Guido Trotter
            break
4598 c0cbdc67 Guido Trotter
        if not found:
4599 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4600 c0cbdc67 Guido Trotter
                                      src_path)
4601 c0cbdc67 Guido Trotter
4602 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4603 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4604 781de953 Iustin Pop
      result.Raise()
4605 781de953 Iustin Pop
      if not result.data:
4606 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4607 a8083063 Iustin Pop
4608 781de953 Iustin Pop
      export_info = result.data
4609 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4610 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4611 a8083063 Iustin Pop
4612 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4613 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4614 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4615 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4616 a8083063 Iustin Pop
4617 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4618 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4619 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4620 09acf207 Guido Trotter
      if instance_disks < export_disks:
4621 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4622 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4623 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4624 a8083063 Iustin Pop
4625 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4626 09acf207 Guido Trotter
      disk_images = []
4627 09acf207 Guido Trotter
      for idx in range(export_disks):
4628 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4629 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4630 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4631 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4632 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4633 09acf207 Guido Trotter
          disk_images.append(image)
4634 09acf207 Guido Trotter
        else:
4635 09acf207 Guido Trotter
          disk_images.append(False)
4636 09acf207 Guido Trotter
4637 09acf207 Guido Trotter
      self.src_images = disk_images
4638 901a65c1 Iustin Pop
4639 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4640 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4641 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4642 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4643 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4644 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4645 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4646 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4647 bc89efc3 Guido Trotter
4648 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4649 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4650 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4651 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4652 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4653 901a65c1 Iustin Pop
4654 901a65c1 Iustin Pop
    if self.op.ip_check:
4655 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4656 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4657 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4658 901a65c1 Iustin Pop
4659 295728df Guido Trotter
    #### mac address generation
4660 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4661 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4662 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4663 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4664 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4665 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4666 295728df Guido Trotter
    # creation job will fail.
4667 295728df Guido Trotter
    for nic in self.nics:
4668 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4669 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4670 295728df Guido Trotter
4671 538475ca Iustin Pop
    #### allocator run
4672 538475ca Iustin Pop
4673 538475ca Iustin Pop
    if self.op.iallocator is not None:
4674 538475ca Iustin Pop
      self._RunAllocator()
4675 0f1a06e3 Manuel Franceschini
4676 901a65c1 Iustin Pop
    #### node related checks
4677 901a65c1 Iustin Pop
4678 901a65c1 Iustin Pop
    # check primary node
4679 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4680 7baf741d Guido Trotter
    assert self.pnode is not None, \
4681 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4682 7527a8a4 Iustin Pop
    if pnode.offline:
4683 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4684 7527a8a4 Iustin Pop
                                 pnode.name)
4685 733a2b6a Iustin Pop
    if pnode.drained:
4686 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4687 733a2b6a Iustin Pop
                                 pnode.name)
4688 7527a8a4 Iustin Pop
4689 901a65c1 Iustin Pop
    self.secondaries = []
4690 901a65c1 Iustin Pop
4691 901a65c1 Iustin Pop
    # mirror node verification
4692 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4693 7baf741d Guido Trotter
      if self.op.snode is None:
4694 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4695 3ecf6786 Iustin Pop
                                   " a mirror node")
4696 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4697 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4698 3ecf6786 Iustin Pop
                                   " the primary node.")
4699 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4700 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4701 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4702 a8083063 Iustin Pop
4703 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4704 6785674e Iustin Pop
4705 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4706 08db7c5c Iustin Pop
                                self.disks)
4707 ed1ebc60 Guido Trotter
4708 8d75db10 Iustin Pop
    # Check lv size requirements
4709 8d75db10 Iustin Pop
    if req_size is not None:
4710 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4711 72737a7f Iustin Pop
                                         self.op.hypervisor)
4712 8d75db10 Iustin Pop
      for node in nodenames:
4713 781de953 Iustin Pop
        info = nodeinfo[node]
4714 781de953 Iustin Pop
        info.Raise()
4715 781de953 Iustin Pop
        info = info.data
4716 8d75db10 Iustin Pop
        if not info:
4717 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4718 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4719 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4720 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4721 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4722 8d75db10 Iustin Pop
                                     " node %s" % node)
4723 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4724 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4725 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4726 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4727 ed1ebc60 Guido Trotter
4728 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4729 6785674e Iustin Pop
4730 a8083063 Iustin Pop
    # os verification
4731 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4732 781de953 Iustin Pop
    result.Raise()
4733 6dfad215 Iustin Pop
    if not isinstance(result.data, objects.OS) or not result.data:
4734 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4735 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4736 a8083063 Iustin Pop
4737 901a65c1 Iustin Pop
    # bridge check on primary node
4738 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4739 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4740 781de953 Iustin Pop
    result.Raise()
4741 781de953 Iustin Pop
    if not result.data:
4742 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4743 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4744 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4745 a8083063 Iustin Pop
4746 49ce1563 Iustin Pop
    # memory check on primary node
4747 49ce1563 Iustin Pop
    if self.op.start:
4748 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4749 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4750 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4751 338e51e8 Iustin Pop
                           self.op.hypervisor)
4752 49ce1563 Iustin Pop
4753 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4754 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4755 a8083063 Iustin Pop

4756 a8083063 Iustin Pop
    """
4757 a8083063 Iustin Pop
    instance = self.op.instance_name
4758 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4759 a8083063 Iustin Pop
4760 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4761 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4762 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4763 2a6469d5 Alexander Schreiber
    else:
4764 2a6469d5 Alexander Schreiber
      network_port = None
4765 58acb49d Alexander Schreiber
4766 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4767 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4768 31a853d2 Iustin Pop
4769 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4770 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4771 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4772 2c313123 Manuel Franceschini
    else:
4773 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4774 2c313123 Manuel Franceschini
4775 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4776 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4777 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4778 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4779 0f1a06e3 Manuel Franceschini
4780 0f1a06e3 Manuel Franceschini
4781 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4782 a8083063 Iustin Pop
                                  self.op.disk_template,
4783 a8083063 Iustin Pop
                                  instance, pnode_name,
4784 08db7c5c Iustin Pop
                                  self.secondaries,
4785 08db7c5c Iustin Pop
                                  self.disks,
4786 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4787 e2a65344 Iustin Pop
                                  self.op.file_driver,
4788 e2a65344 Iustin Pop
                                  0)
4789 a8083063 Iustin Pop
4790 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4791 a8083063 Iustin Pop
                            primary_node=pnode_name,
4792 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4793 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4794 4978db17 Iustin Pop
                            admin_up=False,
4795 58acb49d Alexander Schreiber
                            network_port=network_port,
4796 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4797 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4798 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4799 a8083063 Iustin Pop
                            )
4800 a8083063 Iustin Pop
4801 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4802 796cab27 Iustin Pop
    try:
4803 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4804 796cab27 Iustin Pop
    except errors.OpExecError:
4805 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4806 796cab27 Iustin Pop
      try:
4807 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4808 796cab27 Iustin Pop
      finally:
4809 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4810 796cab27 Iustin Pop
        raise
4811 a8083063 Iustin Pop
4812 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4813 a8083063 Iustin Pop
4814 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4815 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4816 7baf741d Guido Trotter
    # added the instance to the config
4817 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4818 e36e96b4 Guido Trotter
    # Unlock all the nodes
4819 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4820 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4821 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4822 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4823 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4824 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4825 9c8971d7 Guido Trotter
    else:
4826 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4827 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4828 a8083063 Iustin Pop
4829 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4830 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4831 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4832 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4833 a8083063 Iustin Pop
      time.sleep(15)
4834 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4835 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4836 a8083063 Iustin Pop
    else:
4837 a8083063 Iustin Pop
      disk_abort = False
4838 a8083063 Iustin Pop
4839 a8083063 Iustin Pop
    if disk_abort:
4840 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4841 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4842 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4843 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4844 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4845 3ecf6786 Iustin Pop
                               " this instance")
4846 a8083063 Iustin Pop
4847 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4848 a8083063 Iustin Pop
                (instance, pnode_name))
4849 a8083063 Iustin Pop
4850 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4851 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4852 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4853 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
4854 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
4855 20e01edd Iustin Pop
        if msg:
4856 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
4857 20e01edd Iustin Pop
                                   " on node %s: %s" %
4858 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
4859 a8083063 Iustin Pop
4860 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4861 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4862 a8083063 Iustin Pop
        src_node = self.op.src_node
4863 09acf207 Guido Trotter
        src_images = self.src_images
4864 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4865 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4866 09acf207 Guido Trotter
                                                         src_node, src_images,
4867 6c0af70e Guido Trotter
                                                         cluster_name)
4868 781de953 Iustin Pop
        import_result.Raise()
4869 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
4870 09acf207 Guido Trotter
          if not result:
4871 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
4872 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
4873 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
4874 a8083063 Iustin Pop
      else:
4875 a8083063 Iustin Pop
        # also checked in the prereq part
4876 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4877 3ecf6786 Iustin Pop
                                     % self.op.mode)
4878 a8083063 Iustin Pop
4879 a8083063 Iustin Pop
    if self.op.start:
4880 4978db17 Iustin Pop
      iobj.admin_up = True
4881 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4882 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4883 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4884 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4885 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
4886 dd279568 Iustin Pop
      if msg:
4887 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
4888 a8083063 Iustin Pop
4889 a8083063 Iustin Pop
4890 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4891 a8083063 Iustin Pop
  """Connect to an instance's console.
4892 a8083063 Iustin Pop

4893 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4894 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4895 a8083063 Iustin Pop
  console.
4896 a8083063 Iustin Pop

4897 a8083063 Iustin Pop
  """
4898 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4899 8659b73e Guido Trotter
  REQ_BGL = False
4900 8659b73e Guido Trotter
4901 8659b73e Guido Trotter
  def ExpandNames(self):
4902 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
4903 a8083063 Iustin Pop
4904 a8083063 Iustin Pop
  def CheckPrereq(self):
4905 a8083063 Iustin Pop
    """Check prerequisites.
4906 a8083063 Iustin Pop

4907 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4908 a8083063 Iustin Pop

4909 a8083063 Iustin Pop
    """
4910 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4911 8659b73e Guido Trotter
    assert self.instance is not None, \
4912 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4913 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
4914 a8083063 Iustin Pop
4915 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4916 a8083063 Iustin Pop
    """Connect to the console of an instance
4917 a8083063 Iustin Pop

4918 a8083063 Iustin Pop
    """
4919 a8083063 Iustin Pop
    instance = self.instance
4920 a8083063 Iustin Pop
    node = instance.primary_node
4921 a8083063 Iustin Pop
4922 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
4923 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
4924 781de953 Iustin Pop
    node_insts.Raise()
4925 a8083063 Iustin Pop
4926 781de953 Iustin Pop
    if instance.name not in node_insts.data:
4927 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4928 a8083063 Iustin Pop
4929 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4930 a8083063 Iustin Pop
4931 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4932 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4933 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
4934 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
4935 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
4936 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
4937 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4938 b047857b Michael Hanselmann
4939 82122173 Iustin Pop
    # build ssh cmdline
4940 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4941 a8083063 Iustin Pop
4942 a8083063 Iustin Pop
4943 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4944 a8083063 Iustin Pop
  """Replace the disks of an instance.
4945 a8083063 Iustin Pop

4946 a8083063 Iustin Pop
  """
4947 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4948 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4949 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4950 efd990e4 Guido Trotter
  REQ_BGL = False
4951 efd990e4 Guido Trotter
4952 7e9366f7 Iustin Pop
  def CheckArguments(self):
4953 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4954 efd990e4 Guido Trotter
      self.op.remote_node = None
4955 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
4956 7e9366f7 Iustin Pop
      self.op.iallocator = None
4957 7e9366f7 Iustin Pop
4958 7e9366f7 Iustin Pop
    # check for valid parameter combination
4959 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4960 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
4961 7e9366f7 Iustin Pop
      if cnt == 2:
4962 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
4963 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
4964 7e9366f7 Iustin Pop
                                   " new node given")
4965 7e9366f7 Iustin Pop
      elif cnt == 0:
4966 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4967 efd990e4 Guido Trotter
                                   " secondary, not both")
4968 7e9366f7 Iustin Pop
    else: # not replacing the secondary
4969 7e9366f7 Iustin Pop
      if cnt != 2:
4970 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
4971 7e9366f7 Iustin Pop
                                   " be used only when changing the"
4972 7e9366f7 Iustin Pop
                                   " secondary node")
4973 7e9366f7 Iustin Pop
4974 7e9366f7 Iustin Pop
  def ExpandNames(self):
4975 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
4976 7e9366f7 Iustin Pop
4977 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
4978 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4979 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
4980 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
4981 efd990e4 Guido Trotter
      if remote_node is None:
4982 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
4983 efd990e4 Guido Trotter
                                   self.op.remote_node)
4984 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
4985 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
4986 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
4987 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
4988 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
4989 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
4990 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4991 efd990e4 Guido Trotter
    else:
4992 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
4993 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
4994 efd990e4 Guido Trotter
4995 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
4996 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
4997 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
4998 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
4999 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5000 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5001 a8083063 Iustin Pop
5002 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5003 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5004 b6e82a65 Iustin Pop

5005 b6e82a65 Iustin Pop
    """
5006 72737a7f Iustin Pop
    ial = IAllocator(self,
5007 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5008 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5009 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5010 b6e82a65 Iustin Pop
5011 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5012 b6e82a65 Iustin Pop
5013 b6e82a65 Iustin Pop
    if not ial.success:
5014 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5015 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5016 b6e82a65 Iustin Pop
                                                           ial.info))
5017 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5018 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5019 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5020 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5021 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5022 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5023 86d9d3bb Iustin Pop
                 self.op.remote_node)
5024 b6e82a65 Iustin Pop
5025 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5026 a8083063 Iustin Pop
    """Build hooks env.
5027 a8083063 Iustin Pop

5028 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5029 a8083063 Iustin Pop

5030 a8083063 Iustin Pop
    """
5031 a8083063 Iustin Pop
    env = {
5032 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5033 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5034 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5035 a8083063 Iustin Pop
      }
5036 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5037 0834c866 Iustin Pop
    nl = [
5038 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5039 0834c866 Iustin Pop
      self.instance.primary_node,
5040 0834c866 Iustin Pop
      ]
5041 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5042 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5043 a8083063 Iustin Pop
    return env, nl, nl
5044 a8083063 Iustin Pop
5045 a8083063 Iustin Pop
  def CheckPrereq(self):
5046 a8083063 Iustin Pop
    """Check prerequisites.
5047 a8083063 Iustin Pop

5048 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5049 a8083063 Iustin Pop

5050 a8083063 Iustin Pop
    """
5051 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5052 efd990e4 Guido Trotter
    assert instance is not None, \
5053 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5054 a8083063 Iustin Pop
    self.instance = instance
5055 a8083063 Iustin Pop
5056 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5057 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5058 7e9366f7 Iustin Pop
                                 " instances")
5059 a8083063 Iustin Pop
5060 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5061 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5062 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5063 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5064 a8083063 Iustin Pop
5065 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5066 a9e0c397 Iustin Pop
5067 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5068 de8c7666 Guido Trotter
      self._RunAllocator()
5069 b6e82a65 Iustin Pop
5070 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5071 a9e0c397 Iustin Pop
    if remote_node is not None:
5072 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5073 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5074 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5075 a9e0c397 Iustin Pop
    else:
5076 a9e0c397 Iustin Pop
      self.remote_node_info = None
5077 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5078 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5079 3ecf6786 Iustin Pop
                                 " the instance.")
5080 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5081 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5082 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5083 7e9366f7 Iustin Pop
5084 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5085 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5086 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5087 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5088 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5089 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5090 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5091 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5092 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5093 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5094 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5095 7e9366f7 Iustin Pop
    else:
5096 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5097 7e9366f7 Iustin Pop
5098 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5099 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5100 a9e0c397 Iustin Pop
5101 54155f52 Iustin Pop
    if not self.op.disks:
5102 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5103 54155f52 Iustin Pop
5104 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5105 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5106 a8083063 Iustin Pop
5107 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5108 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5109 a9e0c397 Iustin Pop

5110 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5111 e4376078 Iustin Pop

5112 e4376078 Iustin Pop
      1. for each disk to be replaced:
5113 e4376078 Iustin Pop

5114 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5115 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5116 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5117 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5118 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5119 e4376078 Iustin Pop

5120 e4376078 Iustin Pop
      1. wait for sync across all devices
5121 e4376078 Iustin Pop

5122 e4376078 Iustin Pop
      1. for each modified disk:
5123 e4376078 Iustin Pop

5124 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5125 a9e0c397 Iustin Pop

5126 a9e0c397 Iustin Pop
    Failures are not very well handled.
5127 cff90b79 Iustin Pop

5128 a9e0c397 Iustin Pop
    """
5129 cff90b79 Iustin Pop
    steps_total = 6
5130 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5131 a9e0c397 Iustin Pop
    instance = self.instance
5132 a9e0c397 Iustin Pop
    iv_names = {}
5133 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5134 a9e0c397 Iustin Pop
    # start of work
5135 a9e0c397 Iustin Pop
    cfg = self.cfg
5136 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5137 cff90b79 Iustin Pop
    oth_node = self.oth_node
5138 cff90b79 Iustin Pop
5139 cff90b79 Iustin Pop
    # Step: check device activation
5140 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5141 cff90b79 Iustin Pop
    info("checking volume groups")
5142 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5143 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5144 cff90b79 Iustin Pop
    if not results:
5145 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5146 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5147 781de953 Iustin Pop
      res = results[node]
5148 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5149 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5150 cff90b79 Iustin Pop
                                 (my_vg, node))
5151 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5152 54155f52 Iustin Pop
      if idx not in self.op.disks:
5153 cff90b79 Iustin Pop
        continue
5154 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5155 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5156 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5157 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5158 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5159 23829f6f Iustin Pop
        if not msg and not result.payload:
5160 23829f6f Iustin Pop
          msg = "disk not found"
5161 23829f6f Iustin Pop
        if msg:
5162 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5163 23829f6f Iustin Pop
                                   (idx, node, msg))
5164 cff90b79 Iustin Pop
5165 cff90b79 Iustin Pop
    # Step: check other node consistency
5166 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5167 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5168 54155f52 Iustin Pop
      if idx not in self.op.disks:
5169 cff90b79 Iustin Pop
        continue
5170 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5171 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5172 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5173 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5174 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5175 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5176 cff90b79 Iustin Pop
5177 cff90b79 Iustin Pop
    # Step: create new storage
5178 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5179 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5180 54155f52 Iustin Pop
      if idx not in self.op.disks:
5181 a9e0c397 Iustin Pop
        continue
5182 a9e0c397 Iustin Pop
      size = dev.size
5183 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5184 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5185 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5186 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5187 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5188 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5189 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5190 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5191 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5192 a9e0c397 Iustin Pop
      old_lvs = dev.children
5193 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5194 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5195 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5196 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5197 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5198 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5199 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5200 a9e0c397 Iustin Pop
5201 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5202 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5203 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5204 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5205 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5206 781de953 Iustin Pop
      result.Raise()
5207 781de953 Iustin Pop
      if not result.data:
5208 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5209 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5210 cff90b79 Iustin Pop
      #dev.children = []
5211 cff90b79 Iustin Pop
      #cfg.Update(instance)
5212 a9e0c397 Iustin Pop
5213 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5214 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5215 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5216 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5217 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5218 cff90b79 Iustin Pop
5219 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5220 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5221 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5222 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5223 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5224 cff90b79 Iustin Pop
      rlist = []
5225 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5226 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5227 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5228 23829f6f Iustin Pop
          # device exists
5229 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5230 cff90b79 Iustin Pop
5231 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5232 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5233 781de953 Iustin Pop
      result.Raise()
5234 781de953 Iustin Pop
      if not result.data:
5235 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5236 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5237 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5238 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5239 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5240 781de953 Iustin Pop
      result.Raise()
5241 781de953 Iustin Pop
      if not result.data:
5242 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5243 cff90b79 Iustin Pop
5244 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5245 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5246 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5247 a9e0c397 Iustin Pop
5248 cff90b79 Iustin Pop
      for disk in old_lvs:
5249 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5250 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5251 a9e0c397 Iustin Pop
5252 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5253 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5254 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5255 781de953 Iustin Pop
      if result.failed or not result.data:
5256 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5257 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5258 e1bc0878 Iustin Pop
          if msg:
5259 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5260 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5261 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5262 a9e0c397 Iustin Pop
5263 a9e0c397 Iustin Pop
      dev.children = new_lvs
5264 a9e0c397 Iustin Pop
      cfg.Update(instance)
5265 a9e0c397 Iustin Pop
5266 cff90b79 Iustin Pop
    # Step: wait for sync
5267 a9e0c397 Iustin Pop
5268 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5269 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5270 a9e0c397 Iustin Pop
    # return value
5271 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5272 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5273 a9e0c397 Iustin Pop
5274 a9e0c397 Iustin Pop
    # so check manually all the devices
5275 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5276 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5277 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5278 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5279 23829f6f Iustin Pop
      if not msg and not result.payload:
5280 23829f6f Iustin Pop
        msg = "disk not found"
5281 23829f6f Iustin Pop
      if msg:
5282 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5283 23829f6f Iustin Pop
                                 (name, msg))
5284 23829f6f Iustin Pop
      if result.payload[5]:
5285 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5286 a9e0c397 Iustin Pop
5287 cff90b79 Iustin Pop
    # Step: remove old storage
5288 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5289 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5290 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5291 a9e0c397 Iustin Pop
      for lv in old_lvs:
5292 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5293 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5294 e1bc0878 Iustin Pop
        if msg:
5295 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5296 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5297 a9e0c397 Iustin Pop
          continue
5298 a9e0c397 Iustin Pop
5299 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5300 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5301 a9e0c397 Iustin Pop

5302 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5303 a9e0c397 Iustin Pop
      - for all disks of the instance:
5304 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5305 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5306 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5307 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5308 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5309 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5310 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5311 a9e0c397 Iustin Pop
          not network enabled
5312 a9e0c397 Iustin Pop
      - wait for sync across all devices
5313 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5314 a9e0c397 Iustin Pop

5315 a9e0c397 Iustin Pop
    Failures are not very well handled.
5316 0834c866 Iustin Pop

5317 a9e0c397 Iustin Pop
    """
5318 0834c866 Iustin Pop
    steps_total = 6
5319 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5320 a9e0c397 Iustin Pop
    instance = self.instance
5321 a9e0c397 Iustin Pop
    iv_names = {}
5322 a9e0c397 Iustin Pop
    # start of work
5323 a9e0c397 Iustin Pop
    cfg = self.cfg
5324 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5325 a9e0c397 Iustin Pop
    new_node = self.new_node
5326 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5327 a2d59d8b Iustin Pop
    nodes_ip = {
5328 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5329 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5330 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5331 a2d59d8b Iustin Pop
      }
5332 0834c866 Iustin Pop
5333 0834c866 Iustin Pop
    # Step: check device activation
5334 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5335 0834c866 Iustin Pop
    info("checking volume groups")
5336 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5337 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5338 0834c866 Iustin Pop
    for node in pri_node, new_node:
5339 781de953 Iustin Pop
      res = results[node]
5340 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5341 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5342 0834c866 Iustin Pop
                                 (my_vg, node))
5343 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5344 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5345 0834c866 Iustin Pop
        continue
5346 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5347 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5348 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5349 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5350 23829f6f Iustin Pop
      if not msg and not result.payload:
5351 23829f6f Iustin Pop
        msg = "disk not found"
5352 23829f6f Iustin Pop
      if msg:
5353 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5354 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5355 0834c866 Iustin Pop
5356 0834c866 Iustin Pop
    # Step: check other node consistency
5357 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5358 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5359 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5360 0834c866 Iustin Pop
        continue
5361 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5362 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5363 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5364 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5365 0834c866 Iustin Pop
                                 pri_node)
5366 0834c866 Iustin Pop
5367 0834c866 Iustin Pop
    # Step: create new storage
5368 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5369 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5370 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5371 d418ebfb Iustin Pop
           (new_node, idx))
5372 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5373 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5374 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5375 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5376 a9e0c397 Iustin Pop
5377 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5378 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5379 a1578d63 Iustin Pop
    # error and the success paths
5380 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5381 a1578d63 Iustin Pop
                                   instance.name)
5382 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5383 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5384 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5385 0834c866 Iustin Pop
      size = dev.size
5386 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5387 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5388 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5389 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5390 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5391 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5392 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5393 a2d59d8b Iustin Pop
        p_minor = o_minor1
5394 ffa1c0dc Iustin Pop
      else:
5395 a2d59d8b Iustin Pop
        p_minor = o_minor2
5396 a2d59d8b Iustin Pop
5397 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5398 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5399 a2d59d8b Iustin Pop
5400 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5401 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5402 a2d59d8b Iustin Pop
                    new_net_id)
5403 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5404 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5405 8a6c7011 Iustin Pop
                              children=dev.children,
5406 8a6c7011 Iustin Pop
                              size=dev.size)
5407 796cab27 Iustin Pop
      try:
5408 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5409 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5410 82759cb1 Iustin Pop
      except errors.GenericError:
5411 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5412 796cab27 Iustin Pop
        raise
5413 a9e0c397 Iustin Pop
5414 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5415 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5416 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5417 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5418 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5419 cacfd1fd Iustin Pop
      if msg:
5420 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5421 cacfd1fd Iustin Pop
                (idx, msg),
5422 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5423 a9e0c397 Iustin Pop
5424 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5425 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5426 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5427 642445d9 Iustin Pop
5428 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5429 a2d59d8b Iustin Pop
    if msg:
5430 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5431 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5432 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5433 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5434 642445d9 Iustin Pop
5435 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5436 642445d9 Iustin Pop
    # the instance to point to the new secondary
5437 642445d9 Iustin Pop
    info("updating instance configuration")
5438 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5439 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5440 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5441 642445d9 Iustin Pop
    cfg.Update(instance)
5442 a9e0c397 Iustin Pop
5443 642445d9 Iustin Pop
    # and now perform the drbd attach
5444 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5445 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5446 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5447 a2d59d8b Iustin Pop
                                           False)
5448 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5449 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5450 a2d59d8b Iustin Pop
      if msg:
5451 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5452 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5453 a2d59d8b Iustin Pop
                " status of disks")
5454 a9e0c397 Iustin Pop
5455 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5456 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5457 a9e0c397 Iustin Pop
    # return value
5458 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5459 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5460 a9e0c397 Iustin Pop
5461 a9e0c397 Iustin Pop
    # so check manually all the devices
5462 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5463 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5464 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5465 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5466 23829f6f Iustin Pop
      if not msg and not result.payload:
5467 23829f6f Iustin Pop
        msg = "disk not found"
5468 23829f6f Iustin Pop
      if msg:
5469 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5470 23829f6f Iustin Pop
                                 (idx, msg))
5471 23829f6f Iustin Pop
      if result.payload[5]:
5472 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5473 a9e0c397 Iustin Pop
5474 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5475 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5476 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5477 a9e0c397 Iustin Pop
      for lv in old_lvs:
5478 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5479 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5480 e1bc0878 Iustin Pop
        if msg:
5481 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5482 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5483 a9e0c397 Iustin Pop
5484 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5485 a9e0c397 Iustin Pop
    """Execute disk replacement.
5486 a9e0c397 Iustin Pop

5487 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5488 a9e0c397 Iustin Pop

5489 a9e0c397 Iustin Pop
    """
5490 a9e0c397 Iustin Pop
    instance = self.instance
5491 22985314 Guido Trotter
5492 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5493 0d68c45d Iustin Pop
    if not instance.admin_up:
5494 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5495 22985314 Guido Trotter
5496 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5497 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5498 a9e0c397 Iustin Pop
    else:
5499 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5500 22985314 Guido Trotter
5501 22985314 Guido Trotter
    ret = fn(feedback_fn)
5502 22985314 Guido Trotter
5503 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5504 0d68c45d Iustin Pop
    if not instance.admin_up:
5505 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5506 22985314 Guido Trotter
5507 22985314 Guido Trotter
    return ret
5508 a9e0c397 Iustin Pop
5509 a8083063 Iustin Pop
5510 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5511 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5512 8729e0d7 Iustin Pop

5513 8729e0d7 Iustin Pop
  """
5514 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5515 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5516 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5517 31e63dbf Guido Trotter
  REQ_BGL = False
5518 31e63dbf Guido Trotter
5519 31e63dbf Guido Trotter
  def ExpandNames(self):
5520 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5521 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5522 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5523 31e63dbf Guido Trotter
5524 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5525 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5526 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5527 8729e0d7 Iustin Pop
5528 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5529 8729e0d7 Iustin Pop
    """Build hooks env.
5530 8729e0d7 Iustin Pop

5531 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5532 8729e0d7 Iustin Pop

5533 8729e0d7 Iustin Pop
    """
5534 8729e0d7 Iustin Pop
    env = {
5535 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5536 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5537 8729e0d7 Iustin Pop
      }
5538 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5539 8729e0d7 Iustin Pop
    nl = [
5540 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5541 8729e0d7 Iustin Pop
      self.instance.primary_node,
5542 8729e0d7 Iustin Pop
      ]
5543 8729e0d7 Iustin Pop
    return env, nl, nl
5544 8729e0d7 Iustin Pop
5545 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5546 8729e0d7 Iustin Pop
    """Check prerequisites.
5547 8729e0d7 Iustin Pop

5548 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5549 8729e0d7 Iustin Pop

5550 8729e0d7 Iustin Pop
    """
5551 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5552 31e63dbf Guido Trotter
    assert instance is not None, \
5553 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5554 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5555 6b12959c Iustin Pop
    for node in nodenames:
5556 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5557 7527a8a4 Iustin Pop
5558 31e63dbf Guido Trotter
5559 8729e0d7 Iustin Pop
    self.instance = instance
5560 8729e0d7 Iustin Pop
5561 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5562 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5563 8729e0d7 Iustin Pop
                                 " growing.")
5564 8729e0d7 Iustin Pop
5565 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5566 8729e0d7 Iustin Pop
5567 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5568 72737a7f Iustin Pop
                                       instance.hypervisor)
5569 8729e0d7 Iustin Pop
    for node in nodenames:
5570 781de953 Iustin Pop
      info = nodeinfo[node]
5571 781de953 Iustin Pop
      if info.failed or not info.data:
5572 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5573 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5574 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5575 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5576 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5577 8729e0d7 Iustin Pop
                                   " node %s" % node)
5578 781de953 Iustin Pop
      if self.op.amount > vg_free:
5579 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5580 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5581 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5582 8729e0d7 Iustin Pop
5583 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5584 8729e0d7 Iustin Pop
    """Execute disk grow.
5585 8729e0d7 Iustin Pop

5586 8729e0d7 Iustin Pop
    """
5587 8729e0d7 Iustin Pop
    instance = self.instance
5588 ad24e046 Iustin Pop
    disk = self.disk
5589 6b12959c Iustin Pop
    for node in instance.all_nodes:
5590 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5591 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5592 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5593 0959c824 Iustin Pop
      if msg:
5594 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5595 0959c824 Iustin Pop
                                 (node, msg))
5596 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5597 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5598 6605411d Iustin Pop
    if self.op.wait_for_sync:
5599 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5600 6605411d Iustin Pop
      if disk_abort:
5601 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5602 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5603 8729e0d7 Iustin Pop
5604 8729e0d7 Iustin Pop
5605 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5606 a8083063 Iustin Pop
  """Query runtime instance data.
5607 a8083063 Iustin Pop

5608 a8083063 Iustin Pop
  """
5609 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5610 a987fa48 Guido Trotter
  REQ_BGL = False
5611 ae5849b5 Michael Hanselmann
5612 a987fa48 Guido Trotter
  def ExpandNames(self):
5613 a987fa48 Guido Trotter
    self.needed_locks = {}
5614 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5615 a987fa48 Guido Trotter
5616 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5617 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5618 a987fa48 Guido Trotter
5619 a987fa48 Guido Trotter
    if self.op.instances:
5620 a987fa48 Guido Trotter
      self.wanted_names = []
5621 a987fa48 Guido Trotter
      for name in self.op.instances:
5622 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5623 a987fa48 Guido Trotter
        if full_name is None:
5624 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5625 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5626 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5627 a987fa48 Guido Trotter
    else:
5628 a987fa48 Guido Trotter
      self.wanted_names = None
5629 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5630 a987fa48 Guido Trotter
5631 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5632 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5633 a987fa48 Guido Trotter
5634 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5635 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5636 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5637 a8083063 Iustin Pop
5638 a8083063 Iustin Pop
  def CheckPrereq(self):
5639 a8083063 Iustin Pop
    """Check prerequisites.
5640 a8083063 Iustin Pop

5641 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5642 a8083063 Iustin Pop

5643 a8083063 Iustin Pop
    """
5644 a987fa48 Guido Trotter
    if self.wanted_names is None:
5645 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5646 a8083063 Iustin Pop
5647 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5648 a987fa48 Guido Trotter
                             in self.wanted_names]
5649 a987fa48 Guido Trotter
    return
5650 a8083063 Iustin Pop
5651 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5652 a8083063 Iustin Pop
    """Compute block device status.
5653 a8083063 Iustin Pop

5654 a8083063 Iustin Pop
    """
5655 57821cac Iustin Pop
    static = self.op.static
5656 57821cac Iustin Pop
    if not static:
5657 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5658 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5659 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5660 9854f5d0 Iustin Pop
        dev_pstatus = None
5661 9854f5d0 Iustin Pop
      else:
5662 9854f5d0 Iustin Pop
        msg = dev_pstatus.RemoteFailMsg()
5663 9854f5d0 Iustin Pop
        if msg:
5664 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5665 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5666 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5667 57821cac Iustin Pop
    else:
5668 57821cac Iustin Pop
      dev_pstatus = None
5669 57821cac Iustin Pop
5670 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5671 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5672 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5673 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5674 a8083063 Iustin Pop
      else:
5675 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5676 a8083063 Iustin Pop
5677 57821cac Iustin Pop
    if snode and not static:
5678 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5679 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5680 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5681 9854f5d0 Iustin Pop
        dev_sstatus = None
5682 9854f5d0 Iustin Pop
      else:
5683 9854f5d0 Iustin Pop
        msg = dev_sstatus.RemoteFailMsg()
5684 9854f5d0 Iustin Pop
        if msg:
5685 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5686 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5687 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5688 a8083063 Iustin Pop
    else:
5689 a8083063 Iustin Pop
      dev_sstatus = None
5690 a8083063 Iustin Pop
5691 a8083063 Iustin Pop
    if dev.children:
5692 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5693 a8083063 Iustin Pop
                      for child in dev.children]
5694 a8083063 Iustin Pop
    else:
5695 a8083063 Iustin Pop
      dev_children = []
5696 a8083063 Iustin Pop
5697 a8083063 Iustin Pop
    data = {
5698 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5699 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5700 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5701 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5702 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5703 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5704 a8083063 Iustin Pop
      "children": dev_children,
5705 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5706 c98162a7 Iustin Pop
      "size": dev.size,
5707 a8083063 Iustin Pop
      }
5708 a8083063 Iustin Pop
5709 a8083063 Iustin Pop
    return data
5710 a8083063 Iustin Pop
5711 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5712 a8083063 Iustin Pop
    """Gather and return data"""
5713 a8083063 Iustin Pop
    result = {}
5714 338e51e8 Iustin Pop
5715 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5716 338e51e8 Iustin Pop
5717 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5718 57821cac Iustin Pop
      if not self.op.static:
5719 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5720 57821cac Iustin Pop
                                                  instance.name,
5721 57821cac Iustin Pop
                                                  instance.hypervisor)
5722 781de953 Iustin Pop
        remote_info.Raise()
5723 781de953 Iustin Pop
        remote_info = remote_info.data
5724 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5725 57821cac Iustin Pop
          remote_state = "up"
5726 57821cac Iustin Pop
        else:
5727 57821cac Iustin Pop
          remote_state = "down"
5728 a8083063 Iustin Pop
      else:
5729 57821cac Iustin Pop
        remote_state = None
5730 0d68c45d Iustin Pop
      if instance.admin_up:
5731 a8083063 Iustin Pop
        config_state = "up"
5732 0d68c45d Iustin Pop
      else:
5733 0d68c45d Iustin Pop
        config_state = "down"
5734 a8083063 Iustin Pop
5735 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5736 a8083063 Iustin Pop
               for device in instance.disks]
5737 a8083063 Iustin Pop
5738 a8083063 Iustin Pop
      idict = {
5739 a8083063 Iustin Pop
        "name": instance.name,
5740 a8083063 Iustin Pop
        "config_state": config_state,
5741 a8083063 Iustin Pop
        "run_state": remote_state,
5742 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5743 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5744 a8083063 Iustin Pop
        "os": instance.os,
5745 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5746 a8083063 Iustin Pop
        "disks": disks,
5747 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5748 24838135 Iustin Pop
        "network_port": instance.network_port,
5749 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5750 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5751 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5752 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5753 a8083063 Iustin Pop
        }
5754 a8083063 Iustin Pop
5755 a8083063 Iustin Pop
      result[instance.name] = idict
5756 a8083063 Iustin Pop
5757 a8083063 Iustin Pop
    return result
5758 a8083063 Iustin Pop
5759 a8083063 Iustin Pop
5760 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5761 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5762 a8083063 Iustin Pop

5763 a8083063 Iustin Pop
  """
5764 a8083063 Iustin Pop
  HPATH = "instance-modify"
5765 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5766 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5767 1a5c7281 Guido Trotter
  REQ_BGL = False
5768 1a5c7281 Guido Trotter
5769 24991749 Iustin Pop
  def CheckArguments(self):
5770 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5771 24991749 Iustin Pop
      self.op.nics = []
5772 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5773 24991749 Iustin Pop
      self.op.disks = []
5774 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5775 24991749 Iustin Pop
      self.op.beparams = {}
5776 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5777 24991749 Iustin Pop
      self.op.hvparams = {}
5778 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5779 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5780 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5781 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5782 24991749 Iustin Pop
5783 24991749 Iustin Pop
    # Disk validation
5784 24991749 Iustin Pop
    disk_addremove = 0
5785 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5786 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5787 24991749 Iustin Pop
        disk_addremove += 1
5788 24991749 Iustin Pop
        continue
5789 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5790 24991749 Iustin Pop
        disk_addremove += 1
5791 24991749 Iustin Pop
      else:
5792 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5793 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5794 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5795 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5796 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5797 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5798 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5799 24991749 Iustin Pop
        if size is None:
5800 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5801 24991749 Iustin Pop
        try:
5802 24991749 Iustin Pop
          size = int(size)
5803 24991749 Iustin Pop
        except ValueError, err:
5804 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5805 24991749 Iustin Pop
                                     str(err))
5806 24991749 Iustin Pop
        disk_dict['size'] = size
5807 24991749 Iustin Pop
      else:
5808 24991749 Iustin Pop
        # modification of disk
5809 24991749 Iustin Pop
        if 'size' in disk_dict:
5810 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5811 24991749 Iustin Pop
                                     " grow-disk")
5812 24991749 Iustin Pop
5813 24991749 Iustin Pop
    if disk_addremove > 1:
5814 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5815 24991749 Iustin Pop
                                 " supported at a time")
5816 24991749 Iustin Pop
5817 24991749 Iustin Pop
    # NIC validation
5818 24991749 Iustin Pop
    nic_addremove = 0
5819 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5820 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5821 24991749 Iustin Pop
        nic_addremove += 1
5822 24991749 Iustin Pop
        continue
5823 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5824 24991749 Iustin Pop
        nic_addremove += 1
5825 24991749 Iustin Pop
      else:
5826 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5827 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5828 24991749 Iustin Pop
5829 24991749 Iustin Pop
      # nic_dict should be a dict
5830 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5831 24991749 Iustin Pop
      if nic_ip is not None:
5832 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5833 24991749 Iustin Pop
          nic_dict['ip'] = None
5834 24991749 Iustin Pop
        else:
5835 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5836 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5837 5c44da6a Guido Trotter
5838 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
5839 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
5840 5c44da6a Guido Trotter
        if nic_bridge is None:
5841 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
5842 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
5843 5c44da6a Guido Trotter
        if nic_mac is None:
5844 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
5845 5c44da6a Guido Trotter
5846 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5847 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5848 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5849 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5850 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5851 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5852 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5853 5c44da6a Guido Trotter
                                     " modifying an existing nic")
5854 5c44da6a Guido Trotter
5855 24991749 Iustin Pop
    if nic_addremove > 1:
5856 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5857 24991749 Iustin Pop
                                 " supported at a time")
5858 24991749 Iustin Pop
5859 1a5c7281 Guido Trotter
  def ExpandNames(self):
5860 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5861 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5862 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5863 74409b12 Iustin Pop
5864 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5865 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5866 74409b12 Iustin Pop
      self._LockInstancesNodes()
5867 a8083063 Iustin Pop
5868 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5869 a8083063 Iustin Pop
    """Build hooks env.
5870 a8083063 Iustin Pop

5871 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5872 a8083063 Iustin Pop

5873 a8083063 Iustin Pop
    """
5874 396e1b78 Michael Hanselmann
    args = dict()
5875 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5876 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5877 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5878 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5879 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5880 d8dcf3c9 Guido Trotter
    # information at all.
5881 d8dcf3c9 Guido Trotter
    if self.op.nics:
5882 d8dcf3c9 Guido Trotter
      args['nics'] = []
5883 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
5884 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
5885 d8dcf3c9 Guido Trotter
        if idx in nic_override:
5886 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
5887 d8dcf3c9 Guido Trotter
        else:
5888 d8dcf3c9 Guido Trotter
          this_nic_override = {}
5889 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
5890 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
5891 d8dcf3c9 Guido Trotter
        else:
5892 d8dcf3c9 Guido Trotter
          ip = nic.ip
5893 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
5894 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
5895 d8dcf3c9 Guido Trotter
        else:
5896 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
5897 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
5898 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
5899 d8dcf3c9 Guido Trotter
        else:
5900 d8dcf3c9 Guido Trotter
          mac = nic.mac
5901 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
5902 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
5903 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
5904 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
5905 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
5906 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
5907 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
5908 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
5909 d8dcf3c9 Guido Trotter
5910 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5911 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5912 a8083063 Iustin Pop
    return env, nl, nl
5913 a8083063 Iustin Pop
5914 a8083063 Iustin Pop
  def CheckPrereq(self):
5915 a8083063 Iustin Pop
    """Check prerequisites.
5916 a8083063 Iustin Pop

5917 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
5918 a8083063 Iustin Pop

5919 a8083063 Iustin Pop
    """
5920 24991749 Iustin Pop
    force = self.force = self.op.force
5921 a8083063 Iustin Pop
5922 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
5923 31a853d2 Iustin Pop
5924 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5925 1a5c7281 Guido Trotter
    assert self.instance is not None, \
5926 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5927 6b12959c Iustin Pop
    pnode = instance.primary_node
5928 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
5929 74409b12 Iustin Pop
5930 338e51e8 Iustin Pop
    # hvparams processing
5931 74409b12 Iustin Pop
    if self.op.hvparams:
5932 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
5933 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
5934 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5935 74409b12 Iustin Pop
          try:
5936 74409b12 Iustin Pop
            del i_hvdict[key]
5937 74409b12 Iustin Pop
          except KeyError:
5938 74409b12 Iustin Pop
            pass
5939 74409b12 Iustin Pop
        else:
5940 74409b12 Iustin Pop
          i_hvdict[key] = val
5941 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5942 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
5943 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
5944 74409b12 Iustin Pop
                                i_hvdict)
5945 74409b12 Iustin Pop
      # local check
5946 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
5947 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
5948 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5949 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
5950 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
5951 338e51e8 Iustin Pop
    else:
5952 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
5953 338e51e8 Iustin Pop
5954 338e51e8 Iustin Pop
    # beparams processing
5955 338e51e8 Iustin Pop
    if self.op.beparams:
5956 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
5957 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
5958 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
5959 338e51e8 Iustin Pop
          try:
5960 338e51e8 Iustin Pop
            del i_bedict[key]
5961 338e51e8 Iustin Pop
          except KeyError:
5962 338e51e8 Iustin Pop
            pass
5963 338e51e8 Iustin Pop
        else:
5964 338e51e8 Iustin Pop
          i_bedict[key] = val
5965 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
5966 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
5967 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
5968 338e51e8 Iustin Pop
                                i_bedict)
5969 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
5970 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
5971 338e51e8 Iustin Pop
    else:
5972 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
5973 74409b12 Iustin Pop
5974 cfefe007 Guido Trotter
    self.warn = []
5975 647a5d80 Iustin Pop
5976 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
5977 647a5d80 Iustin Pop
      mem_check_list = [pnode]
5978 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
5979 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
5980 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
5981 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
5982 72737a7f Iustin Pop
                                                  instance.hypervisor)
5983 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
5984 72737a7f Iustin Pop
                                         instance.hypervisor)
5985 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
5986 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
5987 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
5988 cfefe007 Guido Trotter
      else:
5989 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
5990 ade0e8cd Guido Trotter
          current_mem = int(instance_info.data['memory'])
5991 cfefe007 Guido Trotter
        else:
5992 cfefe007 Guido Trotter
          # Assume instance not running
5993 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
5994 cfefe007 Guido Trotter
          # and we have no other way to check)
5995 cfefe007 Guido Trotter
          current_mem = 0
5996 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
5997 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
5998 cfefe007 Guido Trotter
        if miss_mem > 0:
5999 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6000 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6001 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6002 cfefe007 Guido Trotter
6003 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6004 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
6005 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6006 ea33068f Iustin Pop
            continue
6007 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
6008 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
6009 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
6010 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6011 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6012 5bc84f33 Alexander Schreiber
6013 24991749 Iustin Pop
    # NIC processing
6014 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6015 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6016 24991749 Iustin Pop
        if not instance.nics:
6017 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6018 24991749 Iustin Pop
        continue
6019 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6020 24991749 Iustin Pop
        # an existing nic
6021 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6022 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6023 24991749 Iustin Pop
                                     " are 0 to %d" %
6024 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6025 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6026 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
6027 5c44da6a Guido Trotter
        if nic_bridge is None:
6028 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
6029 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
6030 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
6031 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
6032 24991749 Iustin Pop
          if self.force:
6033 24991749 Iustin Pop
            self.warn.append(msg)
6034 24991749 Iustin Pop
          else:
6035 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6036 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6037 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6038 5c44da6a Guido Trotter
        if nic_mac is None:
6039 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6040 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6041 5c44da6a Guido Trotter
          # otherwise generate the mac
6042 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6043 5c44da6a Guido Trotter
        else:
6044 5c44da6a Guido Trotter
          # or validate/reserve the current one
6045 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6046 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6047 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6048 24991749 Iustin Pop
6049 24991749 Iustin Pop
    # DISK processing
6050 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6051 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6052 24991749 Iustin Pop
                                 " diskless instances")
6053 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6054 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6055 24991749 Iustin Pop
        if len(instance.disks) == 1:
6056 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6057 24991749 Iustin Pop
                                     " an instance")
6058 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6059 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6060 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
6061 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6062 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
6063 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6064 24991749 Iustin Pop
                                     " disks.")
6065 24991749 Iustin Pop
6066 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6067 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6068 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6069 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6070 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6071 24991749 Iustin Pop
        # an existing disk
6072 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6073 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6074 24991749 Iustin Pop
                                     " are 0 to %d" %
6075 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6076 24991749 Iustin Pop
6077 a8083063 Iustin Pop
    return
6078 a8083063 Iustin Pop
6079 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6080 a8083063 Iustin Pop
    """Modifies an instance.
6081 a8083063 Iustin Pop

6082 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6083 24991749 Iustin Pop

6084 a8083063 Iustin Pop
    """
6085 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6086 cfefe007 Guido Trotter
    # feedback_fn there.
6087 cfefe007 Guido Trotter
    for warn in self.warn:
6088 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6089 cfefe007 Guido Trotter
6090 a8083063 Iustin Pop
    result = []
6091 a8083063 Iustin Pop
    instance = self.instance
6092 24991749 Iustin Pop
    # disk changes
6093 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6094 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6095 24991749 Iustin Pop
        # remove the last disk
6096 24991749 Iustin Pop
        device = instance.disks.pop()
6097 24991749 Iustin Pop
        device_idx = len(instance.disks)
6098 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6099 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6100 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6101 e1bc0878 Iustin Pop
          if msg:
6102 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6103 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6104 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6105 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6106 24991749 Iustin Pop
        # add a new disk
6107 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6108 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6109 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6110 24991749 Iustin Pop
        else:
6111 24991749 Iustin Pop
          file_driver = file_path = None
6112 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6113 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6114 24991749 Iustin Pop
                                         instance.disk_template,
6115 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6116 24991749 Iustin Pop
                                         instance.secondary_nodes,
6117 24991749 Iustin Pop
                                         [disk_dict],
6118 24991749 Iustin Pop
                                         file_path,
6119 24991749 Iustin Pop
                                         file_driver,
6120 24991749 Iustin Pop
                                         disk_idx_base)[0]
6121 24991749 Iustin Pop
        instance.disks.append(new_disk)
6122 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6123 24991749 Iustin Pop
6124 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6125 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6126 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6127 24991749 Iustin Pop
        #HARDCODE
6128 428958aa Iustin Pop
        for node in instance.all_nodes:
6129 428958aa Iustin Pop
          f_create = node == instance.primary_node
6130 796cab27 Iustin Pop
          try:
6131 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6132 428958aa Iustin Pop
                            f_create, info, f_create)
6133 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6134 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6135 428958aa Iustin Pop
                            " node %s: %s",
6136 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6137 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6138 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6139 24991749 Iustin Pop
      else:
6140 24991749 Iustin Pop
        # change a given disk
6141 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6142 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6143 24991749 Iustin Pop
    # NIC changes
6144 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6145 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6146 24991749 Iustin Pop
        # remove the last nic
6147 24991749 Iustin Pop
        del instance.nics[-1]
6148 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6149 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6150 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6151 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6152 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6153 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6154 5c44da6a Guido Trotter
                              bridge=bridge)
6155 24991749 Iustin Pop
        instance.nics.append(new_nic)
6156 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6157 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6158 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6159 24991749 Iustin Pop
      else:
6160 24991749 Iustin Pop
        # change a given nic
6161 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6162 24991749 Iustin Pop
          if key in nic_dict:
6163 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6164 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6165 24991749 Iustin Pop
6166 24991749 Iustin Pop
    # hvparams changes
6167 74409b12 Iustin Pop
    if self.op.hvparams:
6168 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6169 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6170 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6171 24991749 Iustin Pop
6172 24991749 Iustin Pop
    # beparams changes
6173 338e51e8 Iustin Pop
    if self.op.beparams:
6174 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6175 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6176 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6177 a8083063 Iustin Pop
6178 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6179 a8083063 Iustin Pop
6180 a8083063 Iustin Pop
    return result
6181 a8083063 Iustin Pop
6182 a8083063 Iustin Pop
6183 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6184 a8083063 Iustin Pop
  """Query the exports list
6185 a8083063 Iustin Pop

6186 a8083063 Iustin Pop
  """
6187 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6188 21a15682 Guido Trotter
  REQ_BGL = False
6189 21a15682 Guido Trotter
6190 21a15682 Guido Trotter
  def ExpandNames(self):
6191 21a15682 Guido Trotter
    self.needed_locks = {}
6192 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6193 21a15682 Guido Trotter
    if not self.op.nodes:
6194 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6195 21a15682 Guido Trotter
    else:
6196 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6197 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6198 a8083063 Iustin Pop
6199 a8083063 Iustin Pop
  def CheckPrereq(self):
6200 21a15682 Guido Trotter
    """Check prerequisites.
6201 a8083063 Iustin Pop

6202 a8083063 Iustin Pop
    """
6203 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6204 a8083063 Iustin Pop
6205 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6206 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6207 a8083063 Iustin Pop

6208 e4376078 Iustin Pop
    @rtype: dict
6209 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6210 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6211 e4376078 Iustin Pop
        that node.
6212 a8083063 Iustin Pop

6213 a8083063 Iustin Pop
    """
6214 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6215 b04285f2 Guido Trotter
    result = {}
6216 b04285f2 Guido Trotter
    for node in rpcresult:
6217 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6218 b04285f2 Guido Trotter
        result[node] = False
6219 b04285f2 Guido Trotter
      else:
6220 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6221 b04285f2 Guido Trotter
6222 b04285f2 Guido Trotter
    return result
6223 a8083063 Iustin Pop
6224 a8083063 Iustin Pop
6225 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6226 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6227 a8083063 Iustin Pop

6228 a8083063 Iustin Pop
  """
6229 a8083063 Iustin Pop
  HPATH = "instance-export"
6230 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6231 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6232 6657590e Guido Trotter
  REQ_BGL = False
6233 6657590e Guido Trotter
6234 6657590e Guido Trotter
  def ExpandNames(self):
6235 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6236 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6237 6657590e Guido Trotter
    #
6238 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6239 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6240 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6241 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6242 6657590e Guido Trotter
    #    then one to remove, after
6243 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6244 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6245 6657590e Guido Trotter
6246 6657590e Guido Trotter
  def DeclareLocks(self, level):
6247 6657590e Guido Trotter
    """Last minute lock declaration."""
6248 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6249 a8083063 Iustin Pop
6250 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6251 a8083063 Iustin Pop
    """Build hooks env.
6252 a8083063 Iustin Pop

6253 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6254 a8083063 Iustin Pop

6255 a8083063 Iustin Pop
    """
6256 a8083063 Iustin Pop
    env = {
6257 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6258 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6259 a8083063 Iustin Pop
      }
6260 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6261 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6262 a8083063 Iustin Pop
          self.op.target_node]
6263 a8083063 Iustin Pop
    return env, nl, nl
6264 a8083063 Iustin Pop
6265 a8083063 Iustin Pop
  def CheckPrereq(self):
6266 a8083063 Iustin Pop
    """Check prerequisites.
6267 a8083063 Iustin Pop

6268 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6269 a8083063 Iustin Pop

6270 a8083063 Iustin Pop
    """
6271 6657590e Guido Trotter
    instance_name = self.op.instance_name
6272 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6273 6657590e Guido Trotter
    assert self.instance is not None, \
6274 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6275 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6276 a8083063 Iustin Pop
6277 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6278 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6279 a8083063 Iustin Pop
6280 268b8e42 Iustin Pop
    if self.dst_node is None:
6281 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6282 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6283 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6284 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6285 a8083063 Iustin Pop
6286 b6023d6c Manuel Franceschini
    # instance disk type verification
6287 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6288 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6289 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6290 b6023d6c Manuel Franceschini
                                   " file-based disks")
6291 b6023d6c Manuel Franceschini
6292 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6293 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6294 a8083063 Iustin Pop

6295 a8083063 Iustin Pop
    """
6296 a8083063 Iustin Pop
    instance = self.instance
6297 a8083063 Iustin Pop
    dst_node = self.dst_node
6298 a8083063 Iustin Pop
    src_node = instance.primary_node
6299 a8083063 Iustin Pop
    if self.op.shutdown:
6300 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6301 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6302 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6303 1fae010f Iustin Pop
      if msg:
6304 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6305 1fae010f Iustin Pop
                                 " node %s: %s" %
6306 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6307 a8083063 Iustin Pop
6308 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6309 a8083063 Iustin Pop
6310 a8083063 Iustin Pop
    snap_disks = []
6311 a8083063 Iustin Pop
6312 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6313 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6314 998c712c Iustin Pop
    for disk in instance.disks:
6315 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6316 998c712c Iustin Pop
6317 a8083063 Iustin Pop
    try:
6318 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6319 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6320 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6321 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6322 a97da6b7 Iustin Pop
          self.LogWarning("Could not snapshot disk/%d on node %s",
6323 a97da6b7 Iustin Pop
                          idx, src_node)
6324 19d7f90a Guido Trotter
          snap_disks.append(False)
6325 19d7f90a Guido Trotter
        else:
6326 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6327 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6328 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6329 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6330 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6331 a8083063 Iustin Pop
6332 a8083063 Iustin Pop
    finally:
6333 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6334 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6335 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6336 dd279568 Iustin Pop
        if msg:
6337 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6338 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6339 a8083063 Iustin Pop
6340 a8083063 Iustin Pop
    # TODO: check for size
6341 a8083063 Iustin Pop
6342 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6343 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6344 19d7f90a Guido Trotter
      if dev:
6345 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6346 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6347 781de953 Iustin Pop
        if result.failed or not result.data:
6348 a97da6b7 Iustin Pop
          self.LogWarning("Could not export disk/%d from node %s to"
6349 a97da6b7 Iustin Pop
                          " node %s", idx, src_node, dst_node.name)
6350 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6351 e1bc0878 Iustin Pop
        if msg:
6352 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6353 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6354 a8083063 Iustin Pop
6355 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6356 781de953 Iustin Pop
    if result.failed or not result.data:
6357 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6358 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6359 a8083063 Iustin Pop
6360 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6361 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6362 a8083063 Iustin Pop
6363 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6364 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6365 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6366 a8083063 Iustin Pop
    if nodelist:
6367 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6368 a8083063 Iustin Pop
      for node in exportlist:
6369 781de953 Iustin Pop
        if exportlist[node].failed:
6370 781de953 Iustin Pop
          continue
6371 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6372 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6373 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6374 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6375 5c947f38 Iustin Pop
6376 5c947f38 Iustin Pop
6377 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6378 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6379 9ac99fda Guido Trotter

6380 9ac99fda Guido Trotter
  """
6381 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6382 3656b3af Guido Trotter
  REQ_BGL = False
6383 3656b3af Guido Trotter
6384 3656b3af Guido Trotter
  def ExpandNames(self):
6385 3656b3af Guido Trotter
    self.needed_locks = {}
6386 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6387 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6388 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6389 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6390 9ac99fda Guido Trotter
6391 9ac99fda Guido Trotter
  def CheckPrereq(self):
6392 9ac99fda Guido Trotter
    """Check prerequisites.
6393 9ac99fda Guido Trotter
    """
6394 9ac99fda Guido Trotter
    pass
6395 9ac99fda Guido Trotter
6396 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6397 9ac99fda Guido Trotter
    """Remove any export.
6398 9ac99fda Guido Trotter

6399 9ac99fda Guido Trotter
    """
6400 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6401 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6402 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6403 9ac99fda Guido Trotter
    fqdn_warn = False
6404 9ac99fda Guido Trotter
    if not instance_name:
6405 9ac99fda Guido Trotter
      fqdn_warn = True
6406 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6407 9ac99fda Guido Trotter
6408 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6409 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6410 9ac99fda Guido Trotter
    found = False
6411 9ac99fda Guido Trotter
    for node in exportlist:
6412 781de953 Iustin Pop
      if exportlist[node].failed:
6413 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6414 781de953 Iustin Pop
        continue
6415 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6416 9ac99fda Guido Trotter
        found = True
6417 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6418 781de953 Iustin Pop
        if result.failed or not result.data:
6419 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6420 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6421 9ac99fda Guido Trotter
6422 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6423 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6424 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6425 9ac99fda Guido Trotter
                  " Domain Name.")
6426 9ac99fda Guido Trotter
6427 9ac99fda Guido Trotter
6428 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6429 5c947f38 Iustin Pop
  """Generic tags LU.
6430 5c947f38 Iustin Pop

6431 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6432 5c947f38 Iustin Pop

6433 5c947f38 Iustin Pop
  """
6434 5c947f38 Iustin Pop
6435 8646adce Guido Trotter
  def ExpandNames(self):
6436 8646adce Guido Trotter
    self.needed_locks = {}
6437 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6438 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6439 5c947f38 Iustin Pop
      if name is None:
6440 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6441 3ecf6786 Iustin Pop
                                   (self.op.name,))
6442 5c947f38 Iustin Pop
      self.op.name = name
6443 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6444 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6445 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6446 5c947f38 Iustin Pop
      if name is None:
6447 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6448 3ecf6786 Iustin Pop
                                   (self.op.name,))
6449 5c947f38 Iustin Pop
      self.op.name = name
6450 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6451 8646adce Guido Trotter
6452 8646adce Guido Trotter
  def CheckPrereq(self):
6453 8646adce Guido Trotter
    """Check prerequisites.
6454 8646adce Guido Trotter

6455 8646adce Guido Trotter
    """
6456 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6457 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6458 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6459 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6460 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6461 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6462 5c947f38 Iustin Pop
    else:
6463 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6464 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6465 5c947f38 Iustin Pop
6466 5c947f38 Iustin Pop
6467 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6468 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6469 5c947f38 Iustin Pop

6470 5c947f38 Iustin Pop
  """
6471 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6472 8646adce Guido Trotter
  REQ_BGL = False
6473 5c947f38 Iustin Pop
6474 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6475 5c947f38 Iustin Pop
    """Returns the tag list.
6476 5c947f38 Iustin Pop

6477 5c947f38 Iustin Pop
    """
6478 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6479 5c947f38 Iustin Pop
6480 5c947f38 Iustin Pop
6481 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6482 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6483 73415719 Iustin Pop

6484 73415719 Iustin Pop
  """
6485 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6486 8646adce Guido Trotter
  REQ_BGL = False
6487 8646adce Guido Trotter
6488 8646adce Guido Trotter
  def ExpandNames(self):
6489 8646adce Guido Trotter
    self.needed_locks = {}
6490 73415719 Iustin Pop
6491 73415719 Iustin Pop
  def CheckPrereq(self):
6492 73415719 Iustin Pop
    """Check prerequisites.
6493 73415719 Iustin Pop

6494 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6495 73415719 Iustin Pop

6496 73415719 Iustin Pop
    """
6497 73415719 Iustin Pop
    try:
6498 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6499 73415719 Iustin Pop
    except re.error, err:
6500 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6501 73415719 Iustin Pop
                                 (self.op.pattern, err))
6502 73415719 Iustin Pop
6503 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6504 73415719 Iustin Pop
    """Returns the tag list.
6505 73415719 Iustin Pop

6506 73415719 Iustin Pop
    """
6507 73415719 Iustin Pop
    cfg = self.cfg
6508 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6509 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6510 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6511 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6512 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6513 73415719 Iustin Pop
    results = []
6514 73415719 Iustin Pop
    for path, target in tgts:
6515 73415719 Iustin Pop
      for tag in target.GetTags():
6516 73415719 Iustin Pop
        if self.re.search(tag):
6517 73415719 Iustin Pop
          results.append((path, tag))
6518 73415719 Iustin Pop
    return results
6519 73415719 Iustin Pop
6520 73415719 Iustin Pop
6521 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6522 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6523 5c947f38 Iustin Pop

6524 5c947f38 Iustin Pop
  """
6525 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6526 8646adce Guido Trotter
  REQ_BGL = False
6527 5c947f38 Iustin Pop
6528 5c947f38 Iustin Pop
  def CheckPrereq(self):
6529 5c947f38 Iustin Pop
    """Check prerequisites.
6530 5c947f38 Iustin Pop

6531 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6532 5c947f38 Iustin Pop

6533 5c947f38 Iustin Pop
    """
6534 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6535 f27302fa Iustin Pop
    for tag in self.op.tags:
6536 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6537 5c947f38 Iustin Pop
6538 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6539 5c947f38 Iustin Pop
    """Sets the tag.
6540 5c947f38 Iustin Pop

6541 5c947f38 Iustin Pop
    """
6542 5c947f38 Iustin Pop
    try:
6543 f27302fa Iustin Pop
      for tag in self.op.tags:
6544 f27302fa Iustin Pop
        self.target.AddTag(tag)
6545 5c947f38 Iustin Pop
    except errors.TagError, err:
6546 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6547 5c947f38 Iustin Pop
    try:
6548 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6549 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6550 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6551 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6552 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6553 5c947f38 Iustin Pop
6554 5c947f38 Iustin Pop
6555 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6556 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6557 5c947f38 Iustin Pop

6558 5c947f38 Iustin Pop
  """
6559 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6560 8646adce Guido Trotter
  REQ_BGL = False
6561 5c947f38 Iustin Pop
6562 5c947f38 Iustin Pop
  def CheckPrereq(self):
6563 5c947f38 Iustin Pop
    """Check prerequisites.
6564 5c947f38 Iustin Pop

6565 5c947f38 Iustin Pop
    This checks that we have the given tag.
6566 5c947f38 Iustin Pop

6567 5c947f38 Iustin Pop
    """
6568 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6569 f27302fa Iustin Pop
    for tag in self.op.tags:
6570 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6571 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6572 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6573 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6574 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6575 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6576 f27302fa Iustin Pop
      diff_names.sort()
6577 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6578 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6579 5c947f38 Iustin Pop
6580 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6581 5c947f38 Iustin Pop
    """Remove the tag from the object.
6582 5c947f38 Iustin Pop

6583 5c947f38 Iustin Pop
    """
6584 f27302fa Iustin Pop
    for tag in self.op.tags:
6585 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6586 5c947f38 Iustin Pop
    try:
6587 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6588 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6589 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6590 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6591 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6592 06009e27 Iustin Pop
6593 0eed6e61 Guido Trotter
6594 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6595 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6596 06009e27 Iustin Pop

6597 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6598 06009e27 Iustin Pop
  time.
6599 06009e27 Iustin Pop

6600 06009e27 Iustin Pop
  """
6601 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6602 fbe9022f Guido Trotter
  REQ_BGL = False
6603 06009e27 Iustin Pop
6604 fbe9022f Guido Trotter
  def ExpandNames(self):
6605 fbe9022f Guido Trotter
    """Expand names and set required locks.
6606 06009e27 Iustin Pop

6607 fbe9022f Guido Trotter
    This expands the node list, if any.
6608 06009e27 Iustin Pop

6609 06009e27 Iustin Pop
    """
6610 fbe9022f Guido Trotter
    self.needed_locks = {}
6611 06009e27 Iustin Pop
    if self.op.on_nodes:
6612 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6613 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6614 fbe9022f Guido Trotter
      # more information.
6615 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6616 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6617 fbe9022f Guido Trotter
6618 fbe9022f Guido Trotter
  def CheckPrereq(self):
6619 fbe9022f Guido Trotter
    """Check prerequisites.
6620 fbe9022f Guido Trotter

6621 fbe9022f Guido Trotter
    """
6622 06009e27 Iustin Pop
6623 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6624 06009e27 Iustin Pop
    """Do the actual sleep.
6625 06009e27 Iustin Pop

6626 06009e27 Iustin Pop
    """
6627 06009e27 Iustin Pop
    if self.op.on_master:
6628 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6629 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6630 06009e27 Iustin Pop
    if self.op.on_nodes:
6631 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6632 06009e27 Iustin Pop
      if not result:
6633 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6634 06009e27 Iustin Pop
      for node, node_result in result.items():
6635 781de953 Iustin Pop
        node_result.Raise()
6636 781de953 Iustin Pop
        if not node_result.data:
6637 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6638 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6639 d61df03e Iustin Pop
6640 d61df03e Iustin Pop
6641 d1c2dd75 Iustin Pop
class IAllocator(object):
6642 d1c2dd75 Iustin Pop
  """IAllocator framework.
6643 d61df03e Iustin Pop

6644 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6645 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6646 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6647 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6648 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6649 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6650 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6651 d1c2dd75 Iustin Pop
      easy usage
6652 d61df03e Iustin Pop

6653 d61df03e Iustin Pop
  """
6654 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6655 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6656 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6657 d1c2dd75 Iustin Pop
    ]
6658 29859cb7 Iustin Pop
  _RELO_KEYS = [
6659 29859cb7 Iustin Pop
    "relocate_from",
6660 29859cb7 Iustin Pop
    ]
6661 d1c2dd75 Iustin Pop
6662 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6663 72737a7f Iustin Pop
    self.lu = lu
6664 d1c2dd75 Iustin Pop
    # init buffer variables
6665 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6666 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6667 29859cb7 Iustin Pop
    self.mode = mode
6668 29859cb7 Iustin Pop
    self.name = name
6669 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6670 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6671 a0add446 Iustin Pop
    self.hypervisor = None
6672 29859cb7 Iustin Pop
    self.relocate_from = None
6673 27579978 Iustin Pop
    # computed fields
6674 27579978 Iustin Pop
    self.required_nodes = None
6675 d1c2dd75 Iustin Pop
    # init result fields
6676 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6677 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6678 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6679 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6680 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6681 29859cb7 Iustin Pop
    else:
6682 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6683 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6684 d1c2dd75 Iustin Pop
    for key in kwargs:
6685 29859cb7 Iustin Pop
      if key not in keyset:
6686 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6687 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6688 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6689 29859cb7 Iustin Pop
    for key in keyset:
6690 d1c2dd75 Iustin Pop
      if key not in kwargs:
6691 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6692 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6693 d1c2dd75 Iustin Pop
    self._BuildInputData()
6694 d1c2dd75 Iustin Pop
6695 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6696 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6697 d1c2dd75 Iustin Pop

6698 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6699 d1c2dd75 Iustin Pop

6700 d1c2dd75 Iustin Pop
    """
6701 72737a7f Iustin Pop
    cfg = self.lu.cfg
6702 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6703 d1c2dd75 Iustin Pop
    # cluster data
6704 d1c2dd75 Iustin Pop
    data = {
6705 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6706 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6707 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6708 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6709 d1c2dd75 Iustin Pop
      # we don't have job IDs
6710 d61df03e Iustin Pop
      }
6711 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6712 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6713 6286519f Iustin Pop
6714 d1c2dd75 Iustin Pop
    # node data
6715 d1c2dd75 Iustin Pop
    node_results = {}
6716 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6717 8cc7e742 Guido Trotter
6718 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6719 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6720 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6721 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6722 8cc7e742 Guido Trotter
6723 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6724 a0add446 Iustin Pop
                                           hypervisor_name)
6725 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6726 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6727 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6728 1325da74 Iustin Pop
      # first fill in static (config-based) values
6729 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6730 d1c2dd75 Iustin Pop
      pnr = {
6731 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6732 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6733 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6734 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6735 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6736 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6737 d1c2dd75 Iustin Pop
        }
6738 1325da74 Iustin Pop
6739 1325da74 Iustin Pop
      if not ninfo.offline:
6740 1325da74 Iustin Pop
        nresult.Raise()
6741 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6742 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6743 1325da74 Iustin Pop
        remote_info = nresult.data
6744 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6745 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6746 1325da74 Iustin Pop
          if attr not in remote_info:
6747 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6748 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6749 1325da74 Iustin Pop
          try:
6750 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6751 1325da74 Iustin Pop
          except ValueError, err:
6752 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6753 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6754 1325da74 Iustin Pop
        # compute memory used by primary instances
6755 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6756 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6757 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6758 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6759 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6760 1325da74 Iustin Pop
              i_used_mem = 0
6761 1325da74 Iustin Pop
            else:
6762 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6763 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6764 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6765 1325da74 Iustin Pop
6766 1325da74 Iustin Pop
            if iinfo.admin_up:
6767 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6768 1325da74 Iustin Pop
6769 1325da74 Iustin Pop
        # compute memory used by instances
6770 1325da74 Iustin Pop
        pnr_dyn = {
6771 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6772 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6773 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6774 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6775 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6776 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6777 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6778 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6779 1325da74 Iustin Pop
          }
6780 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6781 1325da74 Iustin Pop
6782 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6783 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6784 d1c2dd75 Iustin Pop
6785 d1c2dd75 Iustin Pop
    # instance data
6786 d1c2dd75 Iustin Pop
    instance_data = {}
6787 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6788 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
6789 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
6790 d1c2dd75 Iustin Pop
      pir = {
6791 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6792 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6793 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6794 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6795 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6796 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6797 d1c2dd75 Iustin Pop
        "nics": nic_data,
6798 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6799 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6800 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6801 d1c2dd75 Iustin Pop
        }
6802 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6803 88ae4f85 Iustin Pop
                                                 pir["disks"])
6804 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6805 d61df03e Iustin Pop
6806 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6807 d61df03e Iustin Pop
6808 d1c2dd75 Iustin Pop
    self.in_data = data
6809 d61df03e Iustin Pop
6810 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6811 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6812 d61df03e Iustin Pop

6813 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6814 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6815 d61df03e Iustin Pop

6816 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6817 d1c2dd75 Iustin Pop
    done.
6818 d61df03e Iustin Pop

6819 d1c2dd75 Iustin Pop
    """
6820 d1c2dd75 Iustin Pop
    data = self.in_data
6821 d1c2dd75 Iustin Pop
6822 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6823 d1c2dd75 Iustin Pop
6824 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
6825 27579978 Iustin Pop
      self.required_nodes = 2
6826 27579978 Iustin Pop
    else:
6827 27579978 Iustin Pop
      self.required_nodes = 1
6828 d1c2dd75 Iustin Pop
    request = {
6829 d1c2dd75 Iustin Pop
      "type": "allocate",
6830 d1c2dd75 Iustin Pop
      "name": self.name,
6831 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
6832 d1c2dd75 Iustin Pop
      "tags": self.tags,
6833 d1c2dd75 Iustin Pop
      "os": self.os,
6834 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
6835 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
6836 d1c2dd75 Iustin Pop
      "disks": self.disks,
6837 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
6838 d1c2dd75 Iustin Pop
      "nics": self.nics,
6839 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6840 d1c2dd75 Iustin Pop
      }
6841 d1c2dd75 Iustin Pop
    data["request"] = request
6842 298fe380 Iustin Pop
6843 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
6844 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
6845 298fe380 Iustin Pop

6846 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
6847 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6848 d61df03e Iustin Pop

6849 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6850 d1c2dd75 Iustin Pop
    done.
6851 d61df03e Iustin Pop

6852 d1c2dd75 Iustin Pop
    """
6853 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6854 27579978 Iustin Pop
    if instance is None:
6855 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6856 27579978 Iustin Pop
                                   " IAllocator" % self.name)
6857 27579978 Iustin Pop
6858 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6859 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6860 27579978 Iustin Pop
6861 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6862 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6863 2a139bb0 Iustin Pop
6864 27579978 Iustin Pop
    self.required_nodes = 1
6865 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6866 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6867 27579978 Iustin Pop
6868 d1c2dd75 Iustin Pop
    request = {
6869 2a139bb0 Iustin Pop
      "type": "relocate",
6870 d1c2dd75 Iustin Pop
      "name": self.name,
6871 27579978 Iustin Pop
      "disk_space_total": disk_space,
6872 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6873 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
6874 d1c2dd75 Iustin Pop
      }
6875 27579978 Iustin Pop
    self.in_data["request"] = request
6876 d61df03e Iustin Pop
6877 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
6878 d1c2dd75 Iustin Pop
    """Build input data structures.
6879 d61df03e Iustin Pop

6880 d1c2dd75 Iustin Pop
    """
6881 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
6882 d61df03e Iustin Pop
6883 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6884 d1c2dd75 Iustin Pop
      self._AddNewInstance()
6885 d1c2dd75 Iustin Pop
    else:
6886 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
6887 d61df03e Iustin Pop
6888 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
6889 d61df03e Iustin Pop
6890 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
6891 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
6892 298fe380 Iustin Pop

6893 d1c2dd75 Iustin Pop
    """
6894 72737a7f Iustin Pop
    if call_fn is None:
6895 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
6896 d1c2dd75 Iustin Pop
    data = self.in_text
6897 298fe380 Iustin Pop
6898 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6899 781de953 Iustin Pop
    result.Raise()
6900 298fe380 Iustin Pop
6901 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
6902 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
6903 8d528b7c Iustin Pop
6904 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
6905 8d528b7c Iustin Pop
6906 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
6907 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
6908 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
6909 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
6910 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
6911 8d528b7c Iustin Pop
    self.out_text = stdout
6912 d1c2dd75 Iustin Pop
    if validate:
6913 d1c2dd75 Iustin Pop
      self._ValidateResult()
6914 298fe380 Iustin Pop
6915 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
6916 d1c2dd75 Iustin Pop
    """Process the allocator results.
6917 538475ca Iustin Pop

6918 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
6919 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
6920 538475ca Iustin Pop

6921 d1c2dd75 Iustin Pop
    """
6922 d1c2dd75 Iustin Pop
    try:
6923 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
6924 d1c2dd75 Iustin Pop
    except Exception, err:
6925 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
6926 d1c2dd75 Iustin Pop
6927 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
6928 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
6929 538475ca Iustin Pop
6930 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
6931 d1c2dd75 Iustin Pop
      if key not in rdict:
6932 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
6933 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
6934 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
6935 538475ca Iustin Pop
6936 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
6937 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
6938 d1c2dd75 Iustin Pop
                               " is not a list")
6939 d1c2dd75 Iustin Pop
    self.out_data = rdict
6940 538475ca Iustin Pop
6941 538475ca Iustin Pop
6942 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
6943 d61df03e Iustin Pop
  """Run allocator tests.
6944 d61df03e Iustin Pop

6945 d61df03e Iustin Pop
  This LU runs the allocator tests
6946 d61df03e Iustin Pop

6947 d61df03e Iustin Pop
  """
6948 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
6949 d61df03e Iustin Pop
6950 d61df03e Iustin Pop
  def CheckPrereq(self):
6951 d61df03e Iustin Pop
    """Check prerequisites.
6952 d61df03e Iustin Pop

6953 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
6954 d61df03e Iustin Pop

6955 d61df03e Iustin Pop
    """
6956 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
6957 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
6958 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
6959 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
6960 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
6961 d61df03e Iustin Pop
                                     attr)
6962 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
6963 d61df03e Iustin Pop
      if iname is not None:
6964 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
6965 d61df03e Iustin Pop
                                   iname)
6966 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
6967 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
6968 d61df03e Iustin Pop
      for row in self.op.nics:
6969 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6970 d61df03e Iustin Pop
            "mac" not in row or
6971 d61df03e Iustin Pop
            "ip" not in row or
6972 d61df03e Iustin Pop
            "bridge" not in row):
6973 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6974 d61df03e Iustin Pop
                                     " 'nics' parameter")
6975 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
6976 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
6977 d61df03e Iustin Pop
      for row in self.op.disks:
6978 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
6979 d61df03e Iustin Pop
            "size" not in row or
6980 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
6981 d61df03e Iustin Pop
            "mode" not in row or
6982 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
6983 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
6984 d61df03e Iustin Pop
                                     " 'disks' parameter")
6985 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
6986 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
6987 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
6988 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
6989 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
6990 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
6991 d61df03e Iustin Pop
      if fname is None:
6992 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
6993 d61df03e Iustin Pop
                                   self.op.name)
6994 d61df03e Iustin Pop
      self.op.name = fname
6995 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
6996 d61df03e Iustin Pop
    else:
6997 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
6998 d61df03e Iustin Pop
                                 self.op.mode)
6999 d61df03e Iustin Pop
7000 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7001 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7002 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7003 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7004 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7005 d61df03e Iustin Pop
                                 self.op.direction)
7006 d61df03e Iustin Pop
7007 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7008 d61df03e Iustin Pop
    """Run the allocator test.
7009 d61df03e Iustin Pop

7010 d61df03e Iustin Pop
    """
7011 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7012 72737a7f Iustin Pop
      ial = IAllocator(self,
7013 29859cb7 Iustin Pop
                       mode=self.op.mode,
7014 29859cb7 Iustin Pop
                       name=self.op.name,
7015 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7016 29859cb7 Iustin Pop
                       disks=self.op.disks,
7017 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7018 29859cb7 Iustin Pop
                       os=self.op.os,
7019 29859cb7 Iustin Pop
                       tags=self.op.tags,
7020 29859cb7 Iustin Pop
                       nics=self.op.nics,
7021 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7022 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7023 29859cb7 Iustin Pop
                       )
7024 29859cb7 Iustin Pop
    else:
7025 72737a7f Iustin Pop
      ial = IAllocator(self,
7026 29859cb7 Iustin Pop
                       mode=self.op.mode,
7027 29859cb7 Iustin Pop
                       name=self.op.name,
7028 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7029 29859cb7 Iustin Pop
                       )
7030 d61df03e Iustin Pop
7031 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7032 d1c2dd75 Iustin Pop
      result = ial.in_text
7033 298fe380 Iustin Pop
    else:
7034 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7035 d1c2dd75 Iustin Pop
      result = ial.out_text
7036 298fe380 Iustin Pop
    return result