Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 6f7d4e75

History | View | Annotate | Download (247.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import tempfile
30 a8083063 Iustin Pop
import re
31 a8083063 Iustin Pop
import platform
32 ffa1c0dc Iustin Pop
import logging
33 74409b12 Iustin Pop
import copy
34 4b7735f9 Iustin Pop
import random
35 a8083063 Iustin Pop
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 6048c986 Guido Trotter
from ganeti import locking
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 8d14b30d Iustin Pop
from ganeti import serializer
45 112f18a5 Iustin Pop
from ganeti import ssconf
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 4be4691d Iustin Pop
    self.CheckArguments()
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 4be4691d Iustin Pop
  def CheckArguments(self):
111 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
112 4be4691d Iustin Pop

113 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
114 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
115 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
116 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
117 4be4691d Iustin Pop

118 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
119 4be4691d Iustin Pop
      - CheckPrereq is run after we have aquired locks (and possible
120 4be4691d Iustin Pop
        waited for them)
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
123 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    """
126 4be4691d Iustin Pop
    pass
127 4be4691d Iustin Pop
128 d465bdc8 Guido Trotter
  def ExpandNames(self):
129 d465bdc8 Guido Trotter
    """Expand names for this LU.
130 d465bdc8 Guido Trotter

131 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
132 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
133 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
134 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
135 d465bdc8 Guido Trotter

136 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
137 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
138 d465bdc8 Guido Trotter
    as values. Rules:
139 e4376078 Iustin Pop

140 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
141 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
142 e4376078 Iustin Pop
      - don't put anything for the BGL level
143 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
144 d465bdc8 Guido Trotter

145 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
146 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
147 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
148 3977a4c1 Guido Trotter

149 e4376078 Iustin Pop
    Examples::
150 e4376078 Iustin Pop

151 e4376078 Iustin Pop
      # Acquire all nodes and one instance
152 e4376078 Iustin Pop
      self.needed_locks = {
153 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
154 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155 e4376078 Iustin Pop
      }
156 e4376078 Iustin Pop
      # Acquire just two nodes
157 e4376078 Iustin Pop
      self.needed_locks = {
158 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159 e4376078 Iustin Pop
      }
160 e4376078 Iustin Pop
      # Acquire no locks
161 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
162 d465bdc8 Guido Trotter

163 d465bdc8 Guido Trotter
    """
164 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
165 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
166 d465bdc8 Guido Trotter
    # time.
167 d465bdc8 Guido Trotter
    if self.REQ_BGL:
168 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
169 d465bdc8 Guido Trotter
    else:
170 d465bdc8 Guido Trotter
      raise NotImplementedError
171 d465bdc8 Guido Trotter
172 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
173 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
176 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
177 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
178 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
179 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
180 fb8dcb62 Guido Trotter
    default it does nothing.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
183 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
186 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    """
189 fb8dcb62 Guido Trotter
190 a8083063 Iustin Pop
  def CheckPrereq(self):
191 a8083063 Iustin Pop
    """Check prerequisites for this LU.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
194 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
195 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
196 a8083063 Iustin Pop
    allowed.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
199 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
202 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
208 a8083063 Iustin Pop
    """Execute the LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
211 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
212 a8083063 Iustin Pop
    code, or expected.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    """
215 a8083063 Iustin Pop
    raise NotImplementedError
216 a8083063 Iustin Pop
217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
218 a8083063 Iustin Pop
    """Build hooks environment for this LU.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
221 a8083063 Iustin Pop
    containing the environment that will be used for running the
222 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
223 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
224 a8083063 Iustin Pop
    the hook should run after the execution.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
227 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
228 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
229 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
230 a8083063 Iustin Pop

231 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
234 a8083063 Iustin Pop
    not be called.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    """
237 a8083063 Iustin Pop
    raise NotImplementedError
238 a8083063 Iustin Pop
239 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
243 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
244 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
245 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
246 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
247 1fce5219 Guido Trotter

248 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
251 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
252 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
253 e4376078 Iustin Pop
        in the PRE phase
254 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
255 e4376078 Iustin Pop
        and hook results
256 1fce5219 Guido Trotter

257 1fce5219 Guido Trotter
    """
258 1fce5219 Guido Trotter
    return lu_result
259 1fce5219 Guido Trotter
260 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
261 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
262 43905206 Guido Trotter

263 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
264 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
265 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
266 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
267 43905206 Guido Trotter
    before.
268 43905206 Guido Trotter

269 43905206 Guido Trotter
    """
270 43905206 Guido Trotter
    if self.needed_locks is None:
271 43905206 Guido Trotter
      self.needed_locks = {}
272 43905206 Guido Trotter
    else:
273 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
275 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 43905206 Guido Trotter
    if expanded_name is None:
277 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
278 43905206 Guido Trotter
                                  self.op.instance_name)
279 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 43905206 Guido Trotter
    self.op.instance_name = expanded_name
281 43905206 Guido Trotter
282 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
283 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
286 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
288 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
291 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
294 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
295 c4a2fee1 Guido Trotter

296 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
299 e4376078 Iustin Pop
        self._LockInstancesNodes()
300 c4a2fee1 Guido Trotter

301 a82ce292 Guido Trotter
    @type primary_only: boolean
302 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
303 a82ce292 Guido Trotter

304 c4a2fee1 Guido Trotter
    """
305 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
312 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
    wanted_nodes = []
314 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
317 a82ce292 Guido Trotter
      if not primary_only:
318 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
319 9513b6ab Guido Trotter
320 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324 c4a2fee1 Guido Trotter
325 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
326 c4a2fee1 Guido Trotter
327 a8083063 Iustin Pop
328 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
329 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
332 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  """
335 a8083063 Iustin Pop
  HPATH = None
336 a8083063 Iustin Pop
  HTYPE = None
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
339 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
341 83120a01 Michael Hanselmann

342 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
343 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
344 e4376078 Iustin Pop
  @type nodes: list
345 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
346 e4376078 Iustin Pop
  @rtype: list
347 e4376078 Iustin Pop
  @return: the list of nodes, sorted
348 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349 83120a01 Michael Hanselmann

350 83120a01 Michael Hanselmann
  """
351 3312b702 Iustin Pop
  if not isinstance(nodes, list):
352 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353 dcb93971 Michael Hanselmann
354 ea47808a Guido Trotter
  if not nodes:
355 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
357 dcb93971 Michael Hanselmann
358 ea47808a Guido Trotter
  wanted = []
359 ea47808a Guido Trotter
  for name in nodes:
360 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
361 ea47808a Guido Trotter
    if node is None:
362 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
363 ea47808a Guido Trotter
    wanted.append(node)
364 dcb93971 Michael Hanselmann
365 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
370 3312b702 Iustin Pop

371 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
372 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
373 e4376078 Iustin Pop
  @type instances: list
374 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
375 e4376078 Iustin Pop
  @rtype: list
376 e4376078 Iustin Pop
  @return: the list of instances, sorted
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
379 3312b702 Iustin Pop

380 3312b702 Iustin Pop
  """
381 3312b702 Iustin Pop
  if not isinstance(instances, list):
382 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
  if instances:
385 3312b702 Iustin Pop
    wanted = []
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
    for name in instances:
388 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
389 3312b702 Iustin Pop
      if instance is None:
390 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391 3312b702 Iustin Pop
      wanted.append(instance)
392 3312b702 Iustin Pop
393 3312b702 Iustin Pop
  else:
394 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395 a7f5dc98 Iustin Pop
  return wanted
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
399 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
400 83120a01 Michael Hanselmann

401 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
402 31bf511f Iustin Pop
  @param static: static fields set
403 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
405 83120a01 Michael Hanselmann

406 83120a01 Michael Hanselmann
  """
407 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
408 31bf511f Iustin Pop
  f.Extend(static)
409 31bf511f Iustin Pop
  f.Extend(dynamic)
410 dcb93971 Michael Hanselmann
411 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
412 31bf511f Iustin Pop
  if delta:
413 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414 31bf511f Iustin Pop
                               % ",".join(delta))
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
417 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
418 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
421 a5961235 Iustin Pop
  or None (but that it always exists).
422 a5961235 Iustin Pop

423 a5961235 Iustin Pop
  """
424 a5961235 Iustin Pop
  val = getattr(op, name, None)
425 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
426 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427 a5961235 Iustin Pop
                               (name, str(val)))
428 a5961235 Iustin Pop
  setattr(op, name, val)
429 a5961235 Iustin Pop
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
432 a5961235 Iustin Pop
  """Ensure that a given node is online.
433 a5961235 Iustin Pop

434 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
435 a5961235 Iustin Pop
  @param node: the node to check
436 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
437 a5961235 Iustin Pop

438 a5961235 Iustin Pop
  """
439 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
440 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441 a5961235 Iustin Pop
442 a5961235 Iustin Pop
443 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
444 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
445 733a2b6a Iustin Pop

446 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
447 733a2b6a Iustin Pop
  @param node: the node to check
448 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
449 733a2b6a Iustin Pop

450 733a2b6a Iustin Pop
  """
451 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
452 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453 733a2b6a Iustin Pop
454 733a2b6a Iustin Pop
455 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 2c2690c9 Iustin Pop
                          memory, vcpus, nics, disk_template, disks):
457 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
458 e4376078 Iustin Pop

459 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
460 e4376078 Iustin Pop

461 e4376078 Iustin Pop
  @type name: string
462 e4376078 Iustin Pop
  @param name: the name of the instance
463 e4376078 Iustin Pop
  @type primary_node: string
464 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
465 e4376078 Iustin Pop
  @type secondary_nodes: list
466 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
467 e4376078 Iustin Pop
  @type os_type: string
468 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
469 0d68c45d Iustin Pop
  @type status: boolean
470 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
471 e4376078 Iustin Pop
  @type memory: string
472 e4376078 Iustin Pop
  @param memory: the memory size of the instance
473 e4376078 Iustin Pop
  @type vcpus: string
474 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
475 e4376078 Iustin Pop
  @type nics: list
476 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
477 e4376078 Iustin Pop
      the NICs the instance  has
478 2c2690c9 Iustin Pop
  @type disk_template: string
479 2c2690c9 Iustin Pop
  @param disk_template: the distk template of the instance
480 2c2690c9 Iustin Pop
  @type disks: list
481 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
482 e4376078 Iustin Pop
  @rtype: dict
483 e4376078 Iustin Pop
  @return: the hook environment for this instance
484 ecb215b5 Michael Hanselmann

485 396e1b78 Michael Hanselmann
  """
486 0d68c45d Iustin Pop
  if status:
487 0d68c45d Iustin Pop
    str_status = "up"
488 0d68c45d Iustin Pop
  else:
489 0d68c45d Iustin Pop
    str_status = "down"
490 396e1b78 Michael Hanselmann
  env = {
491 0e137c28 Iustin Pop
    "OP_TARGET": name,
492 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
493 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
494 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
495 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
496 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
497 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
498 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
499 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
500 396e1b78 Michael Hanselmann
  }
501 396e1b78 Michael Hanselmann
502 396e1b78 Michael Hanselmann
  if nics:
503 396e1b78 Michael Hanselmann
    nic_count = len(nics)
504 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
505 396e1b78 Michael Hanselmann
      if ip is None:
506 396e1b78 Michael Hanselmann
        ip = ""
507 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
508 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
509 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
510 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
511 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
512 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
513 396e1b78 Michael Hanselmann
  else:
514 396e1b78 Michael Hanselmann
    nic_count = 0
515 396e1b78 Michael Hanselmann
516 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
517 396e1b78 Michael Hanselmann
518 2c2690c9 Iustin Pop
  if disks:
519 2c2690c9 Iustin Pop
    disk_count = len(disks)
520 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
521 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
522 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
523 2c2690c9 Iustin Pop
  else:
524 2c2690c9 Iustin Pop
    disk_count = 0
525 2c2690c9 Iustin Pop
526 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
527 2c2690c9 Iustin Pop
528 396e1b78 Michael Hanselmann
  return env
529 396e1b78 Michael Hanselmann
530 62f0dd02 Guido Trotter
def _PreBuildNICHooksList(lu, nics):
531 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
532 62f0dd02 Guido Trotter

533 62f0dd02 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv.
534 62f0dd02 Guido Trotter

535 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
536 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
537 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
538 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
539 62f0dd02 Guido Trotter

540 62f0dd02 Guido Trotter
  """
541 62f0dd02 Guido Trotter
  hooks_nics = []
542 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
543 62f0dd02 Guido Trotter
  for nic in nics:
544 62f0dd02 Guido Trotter
    ip = nic.ip
545 62f0dd02 Guido Trotter
    mac = nic.mac
546 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
547 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
548 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
549 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
550 62f0dd02 Guido Trotter
  return hooks_nics
551 396e1b78 Michael Hanselmann
552 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
553 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
554 ecb215b5 Michael Hanselmann

555 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
556 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
557 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
558 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
559 e4376078 Iustin Pop
      environment
560 e4376078 Iustin Pop
  @type override: dict
561 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
562 e4376078 Iustin Pop
      our values
563 e4376078 Iustin Pop
  @rtype: dict
564 e4376078 Iustin Pop
  @return: the hook environment dictionary
565 e4376078 Iustin Pop

566 ecb215b5 Michael Hanselmann
  """
567 338e51e8 Iustin Pop
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
568 396e1b78 Michael Hanselmann
  args = {
569 396e1b78 Michael Hanselmann
    'name': instance.name,
570 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
571 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
572 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
573 0d68c45d Iustin Pop
    'status': instance.admin_up,
574 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
575 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
576 62f0dd02 Guido Trotter
    'nics': _PreBuildNICHooksList(lu, instance.nics),
577 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
578 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
579 396e1b78 Michael Hanselmann
  }
580 396e1b78 Michael Hanselmann
  if override:
581 396e1b78 Michael Hanselmann
    args.update(override)
582 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
583 396e1b78 Michael Hanselmann
584 396e1b78 Michael Hanselmann
585 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
586 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
587 ec0292f1 Iustin Pop

588 ec0292f1 Iustin Pop
  """
589 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
590 ec0292f1 Iustin Pop
  if mod_list:
591 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
592 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
593 ec0292f1 Iustin Pop
    for name in mod_list:
594 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
595 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
596 ec0292f1 Iustin Pop
  if mc_now > mc_max:
597 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
598 ec0292f1 Iustin Pop
               (mc_now, mc_max))
599 ec0292f1 Iustin Pop
600 ec0292f1 Iustin Pop
601 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
602 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
603 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
604 b165e77e Guido Trotter

605 b165e77e Guido Trotter
  """
606 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
607 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
608 b165e77e Guido Trotter
                for nic in target_nics]
609 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
610 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
611 b165e77e Guido Trotter
  if brlist:
612 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
613 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
614 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
615 b165e77e Guido Trotter
616 b165e77e Guido Trotter
617 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
618 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
619 bf6929a2 Alexander Schreiber

620 bf6929a2 Alexander Schreiber
  """
621 b165e77e Guido Trotter
  if node is None:
622 b165e77e Guido Trotter
    node=instance.primary_node
623 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
624 bf6929a2 Alexander Schreiber
625 bf6929a2 Alexander Schreiber
626 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
627 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
628 a8083063 Iustin Pop

629 a8083063 Iustin Pop
  """
630 a8083063 Iustin Pop
  _OP_REQP = []
631 a8083063 Iustin Pop
632 a8083063 Iustin Pop
  def CheckPrereq(self):
633 a8083063 Iustin Pop
    """Check prerequisites.
634 a8083063 Iustin Pop

635 a8083063 Iustin Pop
    This checks whether the cluster is empty.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
638 a8083063 Iustin Pop

639 a8083063 Iustin Pop
    """
640 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
641 a8083063 Iustin Pop
642 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
643 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
644 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
645 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
646 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
647 db915bd1 Michael Hanselmann
    if instancelist:
648 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
649 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
650 a8083063 Iustin Pop
651 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
652 a8083063 Iustin Pop
    """Destroys the cluster.
653 a8083063 Iustin Pop

654 a8083063 Iustin Pop
    """
655 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
656 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
657 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
658 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
659 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
660 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
661 140aa4a8 Iustin Pop
    return master
662 a8083063 Iustin Pop
663 a8083063 Iustin Pop
664 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
665 a8083063 Iustin Pop
  """Verifies the cluster status.
666 a8083063 Iustin Pop

667 a8083063 Iustin Pop
  """
668 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
669 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
670 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
671 d4b9d97f Guido Trotter
  REQ_BGL = False
672 d4b9d97f Guido Trotter
673 d4b9d97f Guido Trotter
  def ExpandNames(self):
674 d4b9d97f Guido Trotter
    self.needed_locks = {
675 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
676 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
677 d4b9d97f Guido Trotter
    }
678 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
679 a8083063 Iustin Pop
680 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
681 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
682 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
683 a8083063 Iustin Pop
    """Run multiple tests against a node.
684 a8083063 Iustin Pop

685 112f18a5 Iustin Pop
    Test list:
686 e4376078 Iustin Pop

687 a8083063 Iustin Pop
      - compares ganeti version
688 a8083063 Iustin Pop
      - checks vg existance and size > 20G
689 a8083063 Iustin Pop
      - checks config file checksum
690 a8083063 Iustin Pop
      - checks ssh to other nodes
691 a8083063 Iustin Pop

692 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
693 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
694 e4376078 Iustin Pop
    @param file_list: required list of files
695 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
696 e4376078 Iustin Pop
    @param node_result: the results from the node
697 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
698 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
699 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
700 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
701 6d2e83d5 Iustin Pop
        and their running status
702 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
703 098c0958 Michael Hanselmann

704 a8083063 Iustin Pop
    """
705 112f18a5 Iustin Pop
    node = nodeinfo.name
706 25361b9a Iustin Pop
707 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
708 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
709 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
710 25361b9a Iustin Pop
      return True
711 25361b9a Iustin Pop
712 a8083063 Iustin Pop
    # compares ganeti version
713 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
714 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
715 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
716 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
717 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
718 a8083063 Iustin Pop
      return True
719 a8083063 Iustin Pop
720 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
721 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
722 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
723 a8083063 Iustin Pop
      return True
724 a8083063 Iustin Pop
725 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
726 a8083063 Iustin Pop
727 a8083063 Iustin Pop
    bad = False
728 e9ce0a64 Iustin Pop
729 e9ce0a64 Iustin Pop
    # full package version
730 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
731 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
732 e9ce0a64 Iustin Pop
                  " node %s %s" %
733 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
734 e9ce0a64 Iustin Pop
735 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
736 cc9e1230 Guido Trotter
    if vg_name is not None:
737 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
738 cc9e1230 Guido Trotter
      if not vglist:
739 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
740 cc9e1230 Guido Trotter
                        (node,))
741 a8083063 Iustin Pop
        bad = True
742 cc9e1230 Guido Trotter
      else:
743 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
744 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
745 cc9e1230 Guido Trotter
        if vgstatus:
746 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
747 cc9e1230 Guido Trotter
          bad = True
748 a8083063 Iustin Pop
749 a8083063 Iustin Pop
    # checks config file checksum
750 a8083063 Iustin Pop
751 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
752 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
753 a8083063 Iustin Pop
      bad = True
754 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
755 a8083063 Iustin Pop
    else:
756 a8083063 Iustin Pop
      for file_name in file_list:
757 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
758 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
759 a8083063 Iustin Pop
        if file_name not in remote_cksum:
760 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
761 112f18a5 Iustin Pop
            bad = True
762 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
763 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
764 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
765 112f18a5 Iustin Pop
            bad = True
766 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
767 112f18a5 Iustin Pop
          else:
768 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
769 112f18a5 Iustin Pop
            bad = True
770 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: non master-candidate has old/wrong file"
771 112f18a5 Iustin Pop
                        " '%s'" % file_name)
772 112f18a5 Iustin Pop
        else:
773 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
774 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
775 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
776 112f18a5 Iustin Pop
                        " candidates" % file_name)
777 a8083063 Iustin Pop
778 25361b9a Iustin Pop
    # checks ssh to any
779 25361b9a Iustin Pop
780 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
781 a8083063 Iustin Pop
      bad = True
782 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
783 a8083063 Iustin Pop
    else:
784 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
785 a8083063 Iustin Pop
        bad = True
786 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
787 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
788 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
789 25361b9a Iustin Pop
790 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
791 9d4bfc96 Iustin Pop
      bad = True
792 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
793 9d4bfc96 Iustin Pop
    else:
794 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
795 9d4bfc96 Iustin Pop
        bad = True
796 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
797 9d4bfc96 Iustin Pop
        for node in nlist:
798 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
799 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
800 9d4bfc96 Iustin Pop
801 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
802 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
803 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
804 e69d05fd Iustin Pop
        if hv_result is not None:
805 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
806 e69d05fd Iustin Pop
                      (hv_name, hv_result))
807 6d2e83d5 Iustin Pop
808 6d2e83d5 Iustin Pop
    # check used drbd list
809 cc9e1230 Guido Trotter
    if vg_name is not None:
810 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
811 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
812 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
813 cc9e1230 Guido Trotter
                    str(used_minors))
814 cc9e1230 Guido Trotter
      else:
815 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
816 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
817 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
818 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
819 cc9e1230 Guido Trotter
            bad = True
820 cc9e1230 Guido Trotter
        for minor in used_minors:
821 cc9e1230 Guido Trotter
          if minor not in drbd_map:
822 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
823 35e994e9 Iustin Pop
                        minor)
824 cc9e1230 Guido Trotter
            bad = True
825 6d2e83d5 Iustin Pop
826 a8083063 Iustin Pop
    return bad
827 a8083063 Iustin Pop
828 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
829 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
830 a8083063 Iustin Pop
    """Verify an instance.
831 a8083063 Iustin Pop

832 a8083063 Iustin Pop
    This function checks to see if the required block devices are
833 a8083063 Iustin Pop
    available on the instance's node.
834 a8083063 Iustin Pop

835 a8083063 Iustin Pop
    """
836 a8083063 Iustin Pop
    bad = False
837 a8083063 Iustin Pop
838 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
839 a8083063 Iustin Pop
840 a8083063 Iustin Pop
    node_vol_should = {}
841 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
842 a8083063 Iustin Pop
843 a8083063 Iustin Pop
    for node in node_vol_should:
844 0a66c968 Iustin Pop
      if node in n_offline:
845 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
846 0a66c968 Iustin Pop
        continue
847 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
848 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
849 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
850 a8083063 Iustin Pop
                          (volume, node))
851 a8083063 Iustin Pop
          bad = True
852 a8083063 Iustin Pop
853 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
854 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
855 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
856 0a66c968 Iustin Pop
          node_current not in n_offline):
857 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
858 a8083063 Iustin Pop
                        (instance, node_current))
859 a8083063 Iustin Pop
        bad = True
860 a8083063 Iustin Pop
861 a8083063 Iustin Pop
    for node in node_instance:
862 a8083063 Iustin Pop
      if (not node == node_current):
863 a8083063 Iustin Pop
        if instance in node_instance[node]:
864 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
865 a8083063 Iustin Pop
                          (instance, node))
866 a8083063 Iustin Pop
          bad = True
867 a8083063 Iustin Pop
868 6a438c98 Michael Hanselmann
    return bad
869 a8083063 Iustin Pop
870 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
871 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
872 a8083063 Iustin Pop

873 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
874 a8083063 Iustin Pop
    reported as unknown.
875 a8083063 Iustin Pop

876 a8083063 Iustin Pop
    """
877 a8083063 Iustin Pop
    bad = False
878 a8083063 Iustin Pop
879 a8083063 Iustin Pop
    for node in node_vol_is:
880 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
881 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
882 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
883 a8083063 Iustin Pop
                      (volume, node))
884 a8083063 Iustin Pop
          bad = True
885 a8083063 Iustin Pop
    return bad
886 a8083063 Iustin Pop
887 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
888 a8083063 Iustin Pop
    """Verify the list of running instances.
889 a8083063 Iustin Pop

890 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
891 a8083063 Iustin Pop

892 a8083063 Iustin Pop
    """
893 a8083063 Iustin Pop
    bad = False
894 a8083063 Iustin Pop
    for node in node_instance:
895 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
896 a8083063 Iustin Pop
        if runninginstance not in instancelist:
897 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
898 a8083063 Iustin Pop
                          (runninginstance, node))
899 a8083063 Iustin Pop
          bad = True
900 a8083063 Iustin Pop
    return bad
901 a8083063 Iustin Pop
902 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
903 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
904 2b3b6ddd Guido Trotter

905 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
906 2b3b6ddd Guido Trotter
    was primary for.
907 2b3b6ddd Guido Trotter

908 2b3b6ddd Guido Trotter
    """
909 2b3b6ddd Guido Trotter
    bad = False
910 2b3b6ddd Guido Trotter
911 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
912 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
913 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
914 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
915 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
916 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
917 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
918 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
919 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
920 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
921 2b3b6ddd Guido Trotter
        needed_mem = 0
922 2b3b6ddd Guido Trotter
        for instance in instances:
923 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
924 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
925 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
926 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
927 2b3b6ddd Guido Trotter
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
928 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
929 2b3b6ddd Guido Trotter
          bad = True
930 2b3b6ddd Guido Trotter
    return bad
931 2b3b6ddd Guido Trotter
932 a8083063 Iustin Pop
  def CheckPrereq(self):
933 a8083063 Iustin Pop
    """Check prerequisites.
934 a8083063 Iustin Pop

935 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
936 e54c4c5e Guido Trotter
    all its members are valid.
937 a8083063 Iustin Pop

938 a8083063 Iustin Pop
    """
939 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
940 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
941 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
942 a8083063 Iustin Pop
943 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
944 d8fff41c Guido Trotter
    """Build hooks env.
945 d8fff41c Guido Trotter

946 d8fff41c Guido Trotter
    Cluster-Verify hooks just rone in the post phase and their failure makes
947 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
948 d8fff41c Guido Trotter

949 d8fff41c Guido Trotter
    """
950 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
951 35e994e9 Iustin Pop
    env = {
952 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
953 35e994e9 Iustin Pop
      }
954 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
955 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
956 35e994e9 Iustin Pop
957 d8fff41c Guido Trotter
    return env, [], all_nodes
958 d8fff41c Guido Trotter
959 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
960 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
961 a8083063 Iustin Pop

962 a8083063 Iustin Pop
    """
963 a8083063 Iustin Pop
    bad = False
964 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
965 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
966 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
969 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
970 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
971 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
972 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
973 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
974 6d2e83d5 Iustin Pop
                        for iname in instancelist)
975 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
976 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
977 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
978 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
979 a8083063 Iustin Pop
    node_volume = {}
980 a8083063 Iustin Pop
    node_instance = {}
981 9c9c7d30 Guido Trotter
    node_info = {}
982 26b6af5e Guido Trotter
    instance_cfg = {}
983 a8083063 Iustin Pop
984 a8083063 Iustin Pop
    # FIXME: verify OS list
985 a8083063 Iustin Pop
    # do local checksums
986 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
987 112f18a5 Iustin Pop
988 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
989 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
990 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
991 112f18a5 Iustin Pop
    file_names.extend(master_files)
992 112f18a5 Iustin Pop
993 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
994 a8083063 Iustin Pop
995 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
996 a8083063 Iustin Pop
    node_verify_param = {
997 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
998 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
999 82e37788 Iustin Pop
                              if not node.offline],
1000 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1001 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1002 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1003 82e37788 Iustin Pop
                                 if not node.offline],
1004 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1005 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1006 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1007 a8083063 Iustin Pop
      }
1008 cc9e1230 Guido Trotter
    if vg_name is not None:
1009 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1010 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1011 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1012 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1013 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1014 a8083063 Iustin Pop
1015 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1016 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1017 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1018 6d2e83d5 Iustin Pop
1019 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1020 112f18a5 Iustin Pop
      node = node_i.name
1021 25361b9a Iustin Pop
1022 0a66c968 Iustin Pop
      if node_i.offline:
1023 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1024 0a66c968 Iustin Pop
        n_offline.append(node)
1025 0a66c968 Iustin Pop
        continue
1026 0a66c968 Iustin Pop
1027 112f18a5 Iustin Pop
      if node == master_node:
1028 25361b9a Iustin Pop
        ntype = "master"
1029 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1030 25361b9a Iustin Pop
        ntype = "master candidate"
1031 22f0f71d Iustin Pop
      elif node_i.drained:
1032 22f0f71d Iustin Pop
        ntype = "drained"
1033 22f0f71d Iustin Pop
        n_drained.append(node)
1034 112f18a5 Iustin Pop
      else:
1035 25361b9a Iustin Pop
        ntype = "regular"
1036 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1037 25361b9a Iustin Pop
1038 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1039 6f68a739 Iustin Pop
      if msg:
1040 6f68a739 Iustin Pop
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1041 25361b9a Iustin Pop
        bad = True
1042 25361b9a Iustin Pop
        continue
1043 25361b9a Iustin Pop
1044 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1045 6d2e83d5 Iustin Pop
      node_drbd = {}
1046 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1047 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1048 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1049 c614e5fb Iustin Pop
                      instance)
1050 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1051 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1052 c614e5fb Iustin Pop
          # unallocated minor in use)
1053 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1054 c614e5fb Iustin Pop
        else:
1055 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1056 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1057 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1058 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1059 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1060 a8083063 Iustin Pop
      bad = bad or result
1061 a8083063 Iustin Pop
1062 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1063 cc9e1230 Guido Trotter
      if vg_name is None:
1064 cc9e1230 Guido Trotter
        node_volume[node] = {}
1065 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1066 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1067 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1068 b63ed789 Iustin Pop
        bad = True
1069 b63ed789 Iustin Pop
        node_volume[node] = {}
1070 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1071 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1072 a8083063 Iustin Pop
        bad = True
1073 a8083063 Iustin Pop
        continue
1074 b63ed789 Iustin Pop
      else:
1075 25361b9a Iustin Pop
        node_volume[node] = lvdata
1076 a8083063 Iustin Pop
1077 a8083063 Iustin Pop
      # node_instance
1078 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1079 25361b9a Iustin Pop
      if not isinstance(idata, list):
1080 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1081 25361b9a Iustin Pop
                    (node,))
1082 a8083063 Iustin Pop
        bad = True
1083 a8083063 Iustin Pop
        continue
1084 a8083063 Iustin Pop
1085 25361b9a Iustin Pop
      node_instance[node] = idata
1086 a8083063 Iustin Pop
1087 9c9c7d30 Guido Trotter
      # node_info
1088 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1089 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1090 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1091 9c9c7d30 Guido Trotter
        bad = True
1092 9c9c7d30 Guido Trotter
        continue
1093 9c9c7d30 Guido Trotter
1094 9c9c7d30 Guido Trotter
      try:
1095 9c9c7d30 Guido Trotter
        node_info[node] = {
1096 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1097 93e4c50b Guido Trotter
          "pinst": [],
1098 93e4c50b Guido Trotter
          "sinst": [],
1099 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1100 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1101 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1102 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1103 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1104 36e7da50 Guido Trotter
          # secondary.
1105 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1106 9c9c7d30 Guido Trotter
        }
1107 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1108 cc9e1230 Guido Trotter
        if vg_name is not None:
1109 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1110 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1111 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1112 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1113 9a198532 Iustin Pop
                        (node, vg_name))
1114 9a198532 Iustin Pop
            bad = True
1115 9a198532 Iustin Pop
            continue
1116 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1117 9a198532 Iustin Pop
      except (ValueError, KeyError):
1118 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1119 9a198532 Iustin Pop
                    " from node %s" % (node,))
1120 9c9c7d30 Guido Trotter
        bad = True
1121 9c9c7d30 Guido Trotter
        continue
1122 9c9c7d30 Guido Trotter
1123 a8083063 Iustin Pop
    node_vol_should = {}
1124 a8083063 Iustin Pop
1125 a8083063 Iustin Pop
    for instance in instancelist:
1126 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1127 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1128 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1129 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1130 c5705f58 Guido Trotter
      bad = bad or result
1131 832261fd Iustin Pop
      inst_nodes_offline = []
1132 a8083063 Iustin Pop
1133 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1134 a8083063 Iustin Pop
1135 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1136 26b6af5e Guido Trotter
1137 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1138 93e4c50b Guido Trotter
      if pnode in node_info:
1139 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1140 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1141 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1142 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1143 93e4c50b Guido Trotter
        bad = True
1144 93e4c50b Guido Trotter
1145 832261fd Iustin Pop
      if pnode in n_offline:
1146 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1147 832261fd Iustin Pop
1148 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1149 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1150 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1151 93e4c50b Guido Trotter
      # supported either.
1152 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1153 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1154 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1155 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1156 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1157 93e4c50b Guido Trotter
                    % instance)
1158 93e4c50b Guido Trotter
1159 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1160 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1161 3924700f Iustin Pop
1162 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1163 93e4c50b Guido Trotter
        if snode in node_info:
1164 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1165 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1166 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1167 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1168 0a66c968 Iustin Pop
        elif snode not in n_offline:
1169 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1170 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1171 832261fd Iustin Pop
          bad = True
1172 832261fd Iustin Pop
        if snode in n_offline:
1173 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1174 832261fd Iustin Pop
1175 832261fd Iustin Pop
      if inst_nodes_offline:
1176 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1177 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1178 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1179 832261fd Iustin Pop
        bad = True
1180 93e4c50b Guido Trotter
1181 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1182 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1183 a8083063 Iustin Pop
                                       feedback_fn)
1184 a8083063 Iustin Pop
    bad = bad or result
1185 a8083063 Iustin Pop
1186 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1187 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1188 a8083063 Iustin Pop
                                         feedback_fn)
1189 a8083063 Iustin Pop
    bad = bad or result
1190 a8083063 Iustin Pop
1191 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1192 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1193 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1194 e54c4c5e Guido Trotter
      bad = bad or result
1195 2b3b6ddd Guido Trotter
1196 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1197 2b3b6ddd Guido Trotter
    if i_non_redundant:
1198 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1199 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1200 2b3b6ddd Guido Trotter
1201 3924700f Iustin Pop
    if i_non_a_balanced:
1202 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1203 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1204 3924700f Iustin Pop
1205 0a66c968 Iustin Pop
    if n_offline:
1206 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1207 0a66c968 Iustin Pop
1208 22f0f71d Iustin Pop
    if n_drained:
1209 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1210 22f0f71d Iustin Pop
1211 34290825 Michael Hanselmann
    return not bad
1212 a8083063 Iustin Pop
1213 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1214 e4376078 Iustin Pop
    """Analize the post-hooks' result
1215 e4376078 Iustin Pop

1216 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1217 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1218 d8fff41c Guido Trotter

1219 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1220 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1221 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1222 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1223 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1224 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1225 e4376078 Iustin Pop
        and hook results
1226 d8fff41c Guido Trotter

1227 d8fff41c Guido Trotter
    """
1228 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1229 38206f3c Iustin Pop
    # their results
1230 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1231 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1232 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1233 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1234 d8fff41c Guido Trotter
      if not hooks_results:
1235 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1236 d8fff41c Guido Trotter
        lu_result = 1
1237 d8fff41c Guido Trotter
      else:
1238 d8fff41c Guido Trotter
        for node_name in hooks_results:
1239 d8fff41c Guido Trotter
          show_node_header = True
1240 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1241 4c4e4e1e Iustin Pop
          msg = res.fail_msg
1242 3fb4f740 Iustin Pop
          if msg:
1243 0a66c968 Iustin Pop
            if res.offline:
1244 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1245 0a66c968 Iustin Pop
              continue
1246 3fb4f740 Iustin Pop
            feedback_fn("    Communication failure in hooks execution: %s" %
1247 3fb4f740 Iustin Pop
                        msg)
1248 d8fff41c Guido Trotter
            lu_result = 1
1249 d8fff41c Guido Trotter
            continue
1250 3fb4f740 Iustin Pop
          for script, hkr, output in res.payload:
1251 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1252 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1253 d8fff41c Guido Trotter
              # failing hooks on that node
1254 d8fff41c Guido Trotter
              if show_node_header:
1255 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1256 d8fff41c Guido Trotter
                show_node_header = False
1257 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1258 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1259 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1260 d8fff41c Guido Trotter
              lu_result = 1
1261 d8fff41c Guido Trotter
1262 d8fff41c Guido Trotter
      return lu_result
1263 d8fff41c Guido Trotter
1264 a8083063 Iustin Pop
1265 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1266 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1267 2c95a8d4 Iustin Pop

1268 2c95a8d4 Iustin Pop
  """
1269 2c95a8d4 Iustin Pop
  _OP_REQP = []
1270 d4b9d97f Guido Trotter
  REQ_BGL = False
1271 d4b9d97f Guido Trotter
1272 d4b9d97f Guido Trotter
  def ExpandNames(self):
1273 d4b9d97f Guido Trotter
    self.needed_locks = {
1274 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1275 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1276 d4b9d97f Guido Trotter
    }
1277 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1278 2c95a8d4 Iustin Pop
1279 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1280 2c95a8d4 Iustin Pop
    """Check prerequisites.
1281 2c95a8d4 Iustin Pop

1282 2c95a8d4 Iustin Pop
    This has no prerequisites.
1283 2c95a8d4 Iustin Pop

1284 2c95a8d4 Iustin Pop
    """
1285 2c95a8d4 Iustin Pop
    pass
1286 2c95a8d4 Iustin Pop
1287 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1288 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1289 2c95a8d4 Iustin Pop

1290 29d376ec Iustin Pop
    @rtype: tuple of three items
1291 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1292 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1293 29d376ec Iustin Pop
        missing volumes
1294 29d376ec Iustin Pop

1295 2c95a8d4 Iustin Pop
    """
1296 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1297 2c95a8d4 Iustin Pop
1298 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1299 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1300 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1301 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1302 2c95a8d4 Iustin Pop
1303 2c95a8d4 Iustin Pop
    nv_dict = {}
1304 2c95a8d4 Iustin Pop
    for inst in instances:
1305 2c95a8d4 Iustin Pop
      inst_lvs = {}
1306 0d68c45d Iustin Pop
      if (not inst.admin_up or
1307 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1308 2c95a8d4 Iustin Pop
        continue
1309 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1310 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1311 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1312 2c95a8d4 Iustin Pop
        for vol in vol_list:
1313 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1314 2c95a8d4 Iustin Pop
1315 2c95a8d4 Iustin Pop
    if not nv_dict:
1316 2c95a8d4 Iustin Pop
      return result
1317 2c95a8d4 Iustin Pop
1318 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1319 2c95a8d4 Iustin Pop
1320 2c95a8d4 Iustin Pop
    to_act = set()
1321 2c95a8d4 Iustin Pop
    for node in nodes:
1322 2c95a8d4 Iustin Pop
      # node_volume
1323 29d376ec Iustin Pop
      node_res = node_lvs[node]
1324 29d376ec Iustin Pop
      if node_res.offline:
1325 ea9ddc07 Iustin Pop
        continue
1326 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1327 29d376ec Iustin Pop
      if msg:
1328 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1329 29d376ec Iustin Pop
        res_nodes[node] = msg
1330 2c95a8d4 Iustin Pop
        continue
1331 2c95a8d4 Iustin Pop
1332 29d376ec Iustin Pop
      lvs = node_res.payload
1333 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1334 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1335 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1336 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1337 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1338 2c95a8d4 Iustin Pop
1339 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1340 b63ed789 Iustin Pop
    # data better
1341 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1342 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1343 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1344 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1345 b63ed789 Iustin Pop
1346 2c95a8d4 Iustin Pop
    return result
1347 2c95a8d4 Iustin Pop
1348 2c95a8d4 Iustin Pop
1349 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1350 07bd8a51 Iustin Pop
  """Rename the cluster.
1351 07bd8a51 Iustin Pop

1352 07bd8a51 Iustin Pop
  """
1353 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1354 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1355 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1356 07bd8a51 Iustin Pop
1357 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1358 07bd8a51 Iustin Pop
    """Build hooks env.
1359 07bd8a51 Iustin Pop

1360 07bd8a51 Iustin Pop
    """
1361 07bd8a51 Iustin Pop
    env = {
1362 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1363 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1364 07bd8a51 Iustin Pop
      }
1365 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1366 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1367 07bd8a51 Iustin Pop
1368 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1369 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1370 07bd8a51 Iustin Pop

1371 07bd8a51 Iustin Pop
    """
1372 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1373 07bd8a51 Iustin Pop
1374 bcf043c9 Iustin Pop
    new_name = hostname.name
1375 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1376 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1377 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1378 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1379 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1380 07bd8a51 Iustin Pop
                                 " cluster has changed")
1381 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1382 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1383 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1384 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1385 07bd8a51 Iustin Pop
                                   new_ip)
1386 07bd8a51 Iustin Pop
1387 07bd8a51 Iustin Pop
    self.op.name = new_name
1388 07bd8a51 Iustin Pop
1389 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1390 07bd8a51 Iustin Pop
    """Rename the cluster.
1391 07bd8a51 Iustin Pop

1392 07bd8a51 Iustin Pop
    """
1393 07bd8a51 Iustin Pop
    clustername = self.op.name
1394 07bd8a51 Iustin Pop
    ip = self.ip
1395 07bd8a51 Iustin Pop
1396 07bd8a51 Iustin Pop
    # shutdown the master IP
1397 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1398 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1399 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1400 07bd8a51 Iustin Pop
1401 07bd8a51 Iustin Pop
    try:
1402 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1403 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1404 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1405 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1406 ec85e3d5 Iustin Pop
1407 ec85e3d5 Iustin Pop
      # update the known hosts file
1408 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1409 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1410 ec85e3d5 Iustin Pop
      try:
1411 ec85e3d5 Iustin Pop
        node_list.remove(master)
1412 ec85e3d5 Iustin Pop
      except ValueError:
1413 ec85e3d5 Iustin Pop
        pass
1414 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1415 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1416 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1417 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1418 6f7d4e75 Iustin Pop
        if msg:
1419 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1420 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1421 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1422 ec85e3d5 Iustin Pop
1423 07bd8a51 Iustin Pop
    finally:
1424 781de953 Iustin Pop
      result = self.rpc.call_node_start_master(master, False)
1425 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1426 b726aff0 Iustin Pop
      if msg:
1427 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1428 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1429 07bd8a51 Iustin Pop
1430 07bd8a51 Iustin Pop
1431 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1432 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1433 8084f9f6 Manuel Franceschini

1434 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1435 e4376078 Iustin Pop
  @param disk: the disk to check
1436 e4376078 Iustin Pop
  @rtype: booleean
1437 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1438 8084f9f6 Manuel Franceschini

1439 8084f9f6 Manuel Franceschini
  """
1440 8084f9f6 Manuel Franceschini
  if disk.children:
1441 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1442 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1443 8084f9f6 Manuel Franceschini
        return True
1444 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1445 8084f9f6 Manuel Franceschini
1446 8084f9f6 Manuel Franceschini
1447 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1448 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1449 8084f9f6 Manuel Franceschini

1450 8084f9f6 Manuel Franceschini
  """
1451 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1452 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1453 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1454 c53279cf Guido Trotter
  REQ_BGL = False
1455 c53279cf Guido Trotter
1456 3994f455 Iustin Pop
  def CheckArguments(self):
1457 4b7735f9 Iustin Pop
    """Check parameters
1458 4b7735f9 Iustin Pop

1459 4b7735f9 Iustin Pop
    """
1460 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1461 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1462 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1463 4b7735f9 Iustin Pop
      try:
1464 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1465 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1466 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1467 4b7735f9 Iustin Pop
                                   str(err))
1468 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1469 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1470 4b7735f9 Iustin Pop
1471 c53279cf Guido Trotter
  def ExpandNames(self):
1472 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1473 c53279cf Guido Trotter
    # all nodes to be modified.
1474 c53279cf Guido Trotter
    self.needed_locks = {
1475 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1476 c53279cf Guido Trotter
    }
1477 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1478 8084f9f6 Manuel Franceschini
1479 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1480 8084f9f6 Manuel Franceschini
    """Build hooks env.
1481 8084f9f6 Manuel Franceschini

1482 8084f9f6 Manuel Franceschini
    """
1483 8084f9f6 Manuel Franceschini
    env = {
1484 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1485 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1486 8084f9f6 Manuel Franceschini
      }
1487 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1488 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1489 8084f9f6 Manuel Franceschini
1490 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1491 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1492 8084f9f6 Manuel Franceschini

1493 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1494 5f83e263 Iustin Pop
    if the given volume group is valid.
1495 8084f9f6 Manuel Franceschini

1496 8084f9f6 Manuel Franceschini
    """
1497 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1498 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1499 8084f9f6 Manuel Franceschini
      for inst in instances:
1500 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1501 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1502 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1503 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1504 8084f9f6 Manuel Franceschini
1505 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1506 779c15bb Iustin Pop
1507 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1508 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1509 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1510 8084f9f6 Manuel Franceschini
      for node in node_list:
1511 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1512 e480923b Iustin Pop
        if msg:
1513 781de953 Iustin Pop
          # ignoring down node
1514 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1515 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1516 781de953 Iustin Pop
          continue
1517 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1518 781de953 Iustin Pop
                                              self.op.vg_name,
1519 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1520 8084f9f6 Manuel Franceschini
        if vgstatus:
1521 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1522 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1523 8084f9f6 Manuel Franceschini
1524 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1525 5af3da74 Guido Trotter
    # validate params changes
1526 779c15bb Iustin Pop
    if self.op.beparams:
1527 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1528 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1529 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1530 779c15bb Iustin Pop
1531 5af3da74 Guido Trotter
    if self.op.nicparams:
1532 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1533 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1534 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1535 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1536 5af3da74 Guido Trotter
1537 779c15bb Iustin Pop
    # hypervisor list/parameters
1538 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1539 779c15bb Iustin Pop
    if self.op.hvparams:
1540 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1541 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1542 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1543 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1544 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1545 779c15bb Iustin Pop
        else:
1546 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1547 779c15bb Iustin Pop
1548 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1549 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1550 779c15bb Iustin Pop
    else:
1551 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1552 779c15bb Iustin Pop
1553 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1554 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1555 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1556 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1557 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1558 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1559 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1560 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1561 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1562 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1563 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1564 779c15bb Iustin Pop
1565 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1566 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1567 8084f9f6 Manuel Franceschini

1568 8084f9f6 Manuel Franceschini
    """
1569 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1570 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1571 b2482333 Guido Trotter
      if not new_volume:
1572 b2482333 Guido Trotter
        new_volume = None
1573 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1574 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1575 779c15bb Iustin Pop
      else:
1576 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1577 779c15bb Iustin Pop
                    " state, not changing")
1578 779c15bb Iustin Pop
    if self.op.hvparams:
1579 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1580 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1581 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1582 779c15bb Iustin Pop
    if self.op.beparams:
1583 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1584 5af3da74 Guido Trotter
    if self.op.nicparams:
1585 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1586 5af3da74 Guido Trotter
1587 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1588 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1589 4b7735f9 Iustin Pop
1590 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1591 8084f9f6 Manuel Franceschini
1592 4b7735f9 Iustin Pop
    # we want to update nodes after the cluster so that if any errors
1593 4b7735f9 Iustin Pop
    # happen, we have recorded and saved the cluster info
1594 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1595 ec0292f1 Iustin Pop
      _AdjustCandidatePool(self)
1596 4b7735f9 Iustin Pop
1597 8084f9f6 Manuel Franceschini
1598 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1599 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1600 28eddce5 Guido Trotter

1601 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1602 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1603 28eddce5 Guido Trotter
  makes sure those are copied.
1604 28eddce5 Guido Trotter

1605 28eddce5 Guido Trotter
  @param lu: calling logical unit
1606 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1607 28eddce5 Guido Trotter

1608 28eddce5 Guido Trotter
  """
1609 28eddce5 Guido Trotter
  # 1. Gather target nodes
1610 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1611 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1612 28eddce5 Guido Trotter
  if additional_nodes is not None:
1613 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
1614 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
1615 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
1616 28eddce5 Guido Trotter
  # 2. Gather files to distribute
1617 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
1618 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
1619 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
1620 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
1621 28eddce5 Guido Trotter
                   ])
1622 e1b8653f Guido Trotter
1623 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1624 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
1625 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
1626 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
1627 e1b8653f Guido Trotter
1628 28eddce5 Guido Trotter
  # 3. Perform the files upload
1629 28eddce5 Guido Trotter
  for fname in dist_files:
1630 28eddce5 Guido Trotter
    if os.path.exists(fname):
1631 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1632 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
1633 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1634 6f7d4e75 Iustin Pop
        if msg:
1635 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1636 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
1637 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
1638 28eddce5 Guido Trotter
1639 28eddce5 Guido Trotter
1640 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1641 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1642 afee0879 Iustin Pop

1643 afee0879 Iustin Pop
  This is a very simple LU.
1644 afee0879 Iustin Pop

1645 afee0879 Iustin Pop
  """
1646 afee0879 Iustin Pop
  _OP_REQP = []
1647 afee0879 Iustin Pop
  REQ_BGL = False
1648 afee0879 Iustin Pop
1649 afee0879 Iustin Pop
  def ExpandNames(self):
1650 afee0879 Iustin Pop
    self.needed_locks = {
1651 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1652 afee0879 Iustin Pop
    }
1653 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1654 afee0879 Iustin Pop
1655 afee0879 Iustin Pop
  def CheckPrereq(self):
1656 afee0879 Iustin Pop
    """Check prerequisites.
1657 afee0879 Iustin Pop

1658 afee0879 Iustin Pop
    """
1659 afee0879 Iustin Pop
1660 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1661 afee0879 Iustin Pop
    """Redistribute the configuration.
1662 afee0879 Iustin Pop

1663 afee0879 Iustin Pop
    """
1664 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1665 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
1666 afee0879 Iustin Pop
1667 afee0879 Iustin Pop
1668 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1669 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1670 a8083063 Iustin Pop

1671 a8083063 Iustin Pop
  """
1672 a8083063 Iustin Pop
  if not instance.disks:
1673 a8083063 Iustin Pop
    return True
1674 a8083063 Iustin Pop
1675 a8083063 Iustin Pop
  if not oneshot:
1676 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
  node = instance.primary_node
1679 a8083063 Iustin Pop
1680 a8083063 Iustin Pop
  for dev in instance.disks:
1681 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1682 a8083063 Iustin Pop
1683 a8083063 Iustin Pop
  retries = 0
1684 a8083063 Iustin Pop
  while True:
1685 a8083063 Iustin Pop
    max_time = 0
1686 a8083063 Iustin Pop
    done = True
1687 a8083063 Iustin Pop
    cumul_degraded = False
1688 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1689 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1690 3efa9051 Iustin Pop
    if msg:
1691 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1692 a8083063 Iustin Pop
      retries += 1
1693 a8083063 Iustin Pop
      if retries >= 10:
1694 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1695 3ecf6786 Iustin Pop
                                 " aborting." % node)
1696 a8083063 Iustin Pop
      time.sleep(6)
1697 a8083063 Iustin Pop
      continue
1698 3efa9051 Iustin Pop
    rstats = rstats.payload
1699 a8083063 Iustin Pop
    retries = 0
1700 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1701 a8083063 Iustin Pop
      if mstat is None:
1702 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1703 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1704 a8083063 Iustin Pop
        continue
1705 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1706 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1707 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1708 a8083063 Iustin Pop
      if perc_done is not None:
1709 a8083063 Iustin Pop
        done = False
1710 a8083063 Iustin Pop
        if est_time is not None:
1711 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1712 a8083063 Iustin Pop
          max_time = est_time
1713 a8083063 Iustin Pop
        else:
1714 a8083063 Iustin Pop
          rem_time = "no time estimate"
1715 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1716 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1717 a8083063 Iustin Pop
    if done or oneshot:
1718 a8083063 Iustin Pop
      break
1719 a8083063 Iustin Pop
1720 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1721 a8083063 Iustin Pop
1722 a8083063 Iustin Pop
  if done:
1723 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1724 a8083063 Iustin Pop
  return not cumul_degraded
1725 a8083063 Iustin Pop
1726 a8083063 Iustin Pop
1727 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1728 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1729 a8083063 Iustin Pop

1730 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1731 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1732 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1733 0834c866 Iustin Pop

1734 a8083063 Iustin Pop
  """
1735 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1736 0834c866 Iustin Pop
  if ldisk:
1737 0834c866 Iustin Pop
    idx = 6
1738 0834c866 Iustin Pop
  else:
1739 0834c866 Iustin Pop
    idx = 5
1740 a8083063 Iustin Pop
1741 a8083063 Iustin Pop
  result = True
1742 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1743 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1744 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1745 23829f6f Iustin Pop
    if msg:
1746 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1747 23829f6f Iustin Pop
      result = False
1748 23829f6f Iustin Pop
    elif not rstats.payload:
1749 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1750 a8083063 Iustin Pop
      result = False
1751 a8083063 Iustin Pop
    else:
1752 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1753 a8083063 Iustin Pop
  if dev.children:
1754 a8083063 Iustin Pop
    for child in dev.children:
1755 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1756 a8083063 Iustin Pop
1757 a8083063 Iustin Pop
  return result
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1761 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1762 a8083063 Iustin Pop

1763 a8083063 Iustin Pop
  """
1764 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1765 6bf01bbb Guido Trotter
  REQ_BGL = False
1766 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1767 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1768 a8083063 Iustin Pop
1769 6bf01bbb Guido Trotter
  def ExpandNames(self):
1770 1f9430d6 Iustin Pop
    if self.op.names:
1771 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1772 1f9430d6 Iustin Pop
1773 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1774 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1775 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1776 1f9430d6 Iustin Pop
1777 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1778 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1779 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1780 6bf01bbb Guido Trotter
    self.needed_locks = {}
1781 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1782 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1783 6bf01bbb Guido Trotter
1784 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1785 6bf01bbb Guido Trotter
    """Check prerequisites.
1786 6bf01bbb Guido Trotter

1787 6bf01bbb Guido Trotter
    """
1788 6bf01bbb Guido Trotter
1789 1f9430d6 Iustin Pop
  @staticmethod
1790 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1791 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1792 1f9430d6 Iustin Pop

1793 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1794 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1795 1f9430d6 Iustin Pop

1796 e4376078 Iustin Pop
    @rtype: dict
1797 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1798 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1799 e4376078 Iustin Pop

1800 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1801 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
1802 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
1803 e4376078 Iustin Pop
          }
1804 1f9430d6 Iustin Pop

1805 1f9430d6 Iustin Pop
    """
1806 1f9430d6 Iustin Pop
    all_os = {}
1807 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1808 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1809 a6ab004b Iustin Pop
    # make all OSes invalid
1810 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1811 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
1812 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
1813 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
1814 1f9430d6 Iustin Pop
        continue
1815 255dcebd Iustin Pop
      for name, path, status, diagnose in nr.payload:
1816 255dcebd Iustin Pop
        if name not in all_os:
1817 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1818 1f9430d6 Iustin Pop
          # for each node in node_list
1819 255dcebd Iustin Pop
          all_os[name] = {}
1820 a6ab004b Iustin Pop
          for nname in good_nodes:
1821 255dcebd Iustin Pop
            all_os[name][nname] = []
1822 255dcebd Iustin Pop
        all_os[name][node_name].append((path, status, diagnose))
1823 1f9430d6 Iustin Pop
    return all_os
1824 a8083063 Iustin Pop
1825 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1826 a8083063 Iustin Pop
    """Compute the list of OSes.
1827 a8083063 Iustin Pop

1828 a8083063 Iustin Pop
    """
1829 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1830 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1831 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1832 1f9430d6 Iustin Pop
    output = []
1833 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
1834 1f9430d6 Iustin Pop
      row = []
1835 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1836 1f9430d6 Iustin Pop
        if field == "name":
1837 1f9430d6 Iustin Pop
          val = os_name
1838 1f9430d6 Iustin Pop
        elif field == "valid":
1839 255dcebd Iustin Pop
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1840 1f9430d6 Iustin Pop
        elif field == "node_status":
1841 255dcebd Iustin Pop
          # this is just a copy of the dict
1842 1f9430d6 Iustin Pop
          val = {}
1843 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
1844 255dcebd Iustin Pop
            val[node_name] = nos_list
1845 1f9430d6 Iustin Pop
        else:
1846 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1847 1f9430d6 Iustin Pop
        row.append(val)
1848 1f9430d6 Iustin Pop
      output.append(row)
1849 1f9430d6 Iustin Pop
1850 1f9430d6 Iustin Pop
    return output
1851 a8083063 Iustin Pop
1852 a8083063 Iustin Pop
1853 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1854 a8083063 Iustin Pop
  """Logical unit for removing a node.
1855 a8083063 Iustin Pop

1856 a8083063 Iustin Pop
  """
1857 a8083063 Iustin Pop
  HPATH = "node-remove"
1858 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1859 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1862 a8083063 Iustin Pop
    """Build hooks env.
1863 a8083063 Iustin Pop

1864 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1865 d08869ee Guido Trotter
    node would then be impossible to remove.
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
    """
1868 396e1b78 Michael Hanselmann
    env = {
1869 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1870 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1871 396e1b78 Michael Hanselmann
      }
1872 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1873 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1874 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1875 a8083063 Iustin Pop
1876 a8083063 Iustin Pop
  def CheckPrereq(self):
1877 a8083063 Iustin Pop
    """Check prerequisites.
1878 a8083063 Iustin Pop

1879 a8083063 Iustin Pop
    This checks:
1880 a8083063 Iustin Pop
     - the node exists in the configuration
1881 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1882 a8083063 Iustin Pop
     - it's not the master
1883 a8083063 Iustin Pop

1884 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1885 a8083063 Iustin Pop

1886 a8083063 Iustin Pop
    """
1887 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1888 a8083063 Iustin Pop
    if node is None:
1889 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1892 a8083063 Iustin Pop
1893 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1894 a8083063 Iustin Pop
    if node.name == masternode:
1895 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1896 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1897 a8083063 Iustin Pop
1898 a8083063 Iustin Pop
    for instance_name in instance_list:
1899 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1900 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1901 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1902 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1903 a8083063 Iustin Pop
    self.op.node_name = node.name
1904 a8083063 Iustin Pop
    self.node = node
1905 a8083063 Iustin Pop
1906 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1907 a8083063 Iustin Pop
    """Removes the node from the cluster.
1908 a8083063 Iustin Pop

1909 a8083063 Iustin Pop
    """
1910 a8083063 Iustin Pop
    node = self.node
1911 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1912 9a4f63d1 Iustin Pop
                 node.name)
1913 a8083063 Iustin Pop
1914 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1915 a8083063 Iustin Pop
1916 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
1917 4c4e4e1e Iustin Pop
    msg = result.fail_msg
1918 0623d351 Iustin Pop
    if msg:
1919 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
1920 0623d351 Iustin Pop
                      " the cluster: %s", msg)
1921 c8a0948f Michael Hanselmann
1922 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1923 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1924 eb1742d5 Guido Trotter
1925 a8083063 Iustin Pop
1926 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1927 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1928 a8083063 Iustin Pop

1929 a8083063 Iustin Pop
  """
1930 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1931 35705d8f Guido Trotter
  REQ_BGL = False
1932 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1933 31bf511f Iustin Pop
    "dtotal", "dfree",
1934 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1935 31bf511f Iustin Pop
    "bootid",
1936 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1937 31bf511f Iustin Pop
    )
1938 31bf511f Iustin Pop
1939 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1940 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1941 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1942 31bf511f Iustin Pop
    "pip", "sip", "tags",
1943 31bf511f Iustin Pop
    "serial_no",
1944 0e67cdbe Iustin Pop
    "master_candidate",
1945 0e67cdbe Iustin Pop
    "master",
1946 9ddb5e45 Iustin Pop
    "offline",
1947 0b2454b9 Iustin Pop
    "drained",
1948 31bf511f Iustin Pop
    )
1949 a8083063 Iustin Pop
1950 35705d8f Guido Trotter
  def ExpandNames(self):
1951 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1952 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1953 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1954 a8083063 Iustin Pop
1955 35705d8f Guido Trotter
    self.needed_locks = {}
1956 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1957 c8d8b4c8 Iustin Pop
1958 c8d8b4c8 Iustin Pop
    if self.op.names:
1959 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1960 35705d8f Guido Trotter
    else:
1961 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1962 c8d8b4c8 Iustin Pop
1963 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1964 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1965 c8d8b4c8 Iustin Pop
    if self.do_locking:
1966 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1967 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1968 c8d8b4c8 Iustin Pop
1969 35705d8f Guido Trotter
1970 35705d8f Guido Trotter
  def CheckPrereq(self):
1971 35705d8f Guido Trotter
    """Check prerequisites.
1972 35705d8f Guido Trotter

1973 35705d8f Guido Trotter
    """
1974 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
1975 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
1976 c8d8b4c8 Iustin Pop
    pass
1977 a8083063 Iustin Pop
1978 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1979 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1980 a8083063 Iustin Pop

1981 a8083063 Iustin Pop
    """
1982 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
1983 c8d8b4c8 Iustin Pop
    if self.do_locking:
1984 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1985 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
1986 3fa93523 Guido Trotter
      nodenames = self.wanted
1987 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
1988 3fa93523 Guido Trotter
      if missing:
1989 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
1990 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
1991 c8d8b4c8 Iustin Pop
    else:
1992 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
1993 c1f1cbb2 Iustin Pop
1994 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
1995 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
1996 a8083063 Iustin Pop
1997 a8083063 Iustin Pop
    # begin data gathering
1998 a8083063 Iustin Pop
1999 bc8e4a1a Iustin Pop
    if self.do_node_query:
2000 a8083063 Iustin Pop
      live_data = {}
2001 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2002 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2003 a8083063 Iustin Pop
      for name in nodenames:
2004 781de953 Iustin Pop
        nodeinfo = node_data[name]
2005 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2006 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2007 d599d686 Iustin Pop
          fn = utils.TryConvert
2008 a8083063 Iustin Pop
          live_data[name] = {
2009 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2010 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2011 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2012 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2013 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2014 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2015 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2016 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2017 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2018 a8083063 Iustin Pop
            }
2019 a8083063 Iustin Pop
        else:
2020 a8083063 Iustin Pop
          live_data[name] = {}
2021 a8083063 Iustin Pop
    else:
2022 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2023 a8083063 Iustin Pop
2024 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2025 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2026 a8083063 Iustin Pop
2027 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2028 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2029 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2030 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2031 a8083063 Iustin Pop
2032 ec223efb Iustin Pop
      for instance_name in instancelist:
2033 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2034 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2035 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2036 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2037 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2038 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2039 a8083063 Iustin Pop
2040 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2041 0e67cdbe Iustin Pop
2042 a8083063 Iustin Pop
    # end data gathering
2043 a8083063 Iustin Pop
2044 a8083063 Iustin Pop
    output = []
2045 a8083063 Iustin Pop
    for node in nodelist:
2046 a8083063 Iustin Pop
      node_output = []
2047 a8083063 Iustin Pop
      for field in self.op.output_fields:
2048 a8083063 Iustin Pop
        if field == "name":
2049 a8083063 Iustin Pop
          val = node.name
2050 ec223efb Iustin Pop
        elif field == "pinst_list":
2051 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2052 ec223efb Iustin Pop
        elif field == "sinst_list":
2053 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2054 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2055 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2056 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2057 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2058 a8083063 Iustin Pop
        elif field == "pip":
2059 a8083063 Iustin Pop
          val = node.primary_ip
2060 a8083063 Iustin Pop
        elif field == "sip":
2061 a8083063 Iustin Pop
          val = node.secondary_ip
2062 130a6a6f Iustin Pop
        elif field == "tags":
2063 130a6a6f Iustin Pop
          val = list(node.GetTags())
2064 38d7239a Iustin Pop
        elif field == "serial_no":
2065 38d7239a Iustin Pop
          val = node.serial_no
2066 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2067 0e67cdbe Iustin Pop
          val = node.master_candidate
2068 0e67cdbe Iustin Pop
        elif field == "master":
2069 0e67cdbe Iustin Pop
          val = node.name == master_node
2070 9ddb5e45 Iustin Pop
        elif field == "offline":
2071 9ddb5e45 Iustin Pop
          val = node.offline
2072 0b2454b9 Iustin Pop
        elif field == "drained":
2073 0b2454b9 Iustin Pop
          val = node.drained
2074 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2075 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2076 a8083063 Iustin Pop
        else:
2077 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2078 a8083063 Iustin Pop
        node_output.append(val)
2079 a8083063 Iustin Pop
      output.append(node_output)
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
    return output
2082 a8083063 Iustin Pop
2083 a8083063 Iustin Pop
2084 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2085 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2086 dcb93971 Michael Hanselmann

2087 dcb93971 Michael Hanselmann
  """
2088 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2089 21a15682 Guido Trotter
  REQ_BGL = False
2090 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2091 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2092 21a15682 Guido Trotter
2093 21a15682 Guido Trotter
  def ExpandNames(self):
2094 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2095 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2096 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2097 21a15682 Guido Trotter
2098 21a15682 Guido Trotter
    self.needed_locks = {}
2099 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2100 21a15682 Guido Trotter
    if not self.op.nodes:
2101 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2102 21a15682 Guido Trotter
    else:
2103 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2104 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2105 dcb93971 Michael Hanselmann
2106 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2107 dcb93971 Michael Hanselmann
    """Check prerequisites.
2108 dcb93971 Michael Hanselmann

2109 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2110 dcb93971 Michael Hanselmann

2111 dcb93971 Michael Hanselmann
    """
2112 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2113 dcb93971 Michael Hanselmann
2114 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2115 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2116 dcb93971 Michael Hanselmann

2117 dcb93971 Michael Hanselmann
    """
2118 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2119 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2120 dcb93971 Michael Hanselmann
2121 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2122 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2123 dcb93971 Michael Hanselmann
2124 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2125 dcb93971 Michael Hanselmann
2126 dcb93971 Michael Hanselmann
    output = []
2127 dcb93971 Michael Hanselmann
    for node in nodenames:
2128 10bfe6cb Iustin Pop
      nresult = volumes[node]
2129 10bfe6cb Iustin Pop
      if nresult.offline:
2130 10bfe6cb Iustin Pop
        continue
2131 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2132 10bfe6cb Iustin Pop
      if msg:
2133 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2134 37d19eb2 Michael Hanselmann
        continue
2135 37d19eb2 Michael Hanselmann
2136 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2137 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2138 dcb93971 Michael Hanselmann
2139 dcb93971 Michael Hanselmann
      for vol in node_vols:
2140 dcb93971 Michael Hanselmann
        node_output = []
2141 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2142 dcb93971 Michael Hanselmann
          if field == "node":
2143 dcb93971 Michael Hanselmann
            val = node
2144 dcb93971 Michael Hanselmann
          elif field == "phys":
2145 dcb93971 Michael Hanselmann
            val = vol['dev']
2146 dcb93971 Michael Hanselmann
          elif field == "vg":
2147 dcb93971 Michael Hanselmann
            val = vol['vg']
2148 dcb93971 Michael Hanselmann
          elif field == "name":
2149 dcb93971 Michael Hanselmann
            val = vol['name']
2150 dcb93971 Michael Hanselmann
          elif field == "size":
2151 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2152 dcb93971 Michael Hanselmann
          elif field == "instance":
2153 dcb93971 Michael Hanselmann
            for inst in ilist:
2154 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2155 dcb93971 Michael Hanselmann
                continue
2156 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2157 dcb93971 Michael Hanselmann
                val = inst.name
2158 dcb93971 Michael Hanselmann
                break
2159 dcb93971 Michael Hanselmann
            else:
2160 dcb93971 Michael Hanselmann
              val = '-'
2161 dcb93971 Michael Hanselmann
          else:
2162 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2163 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2164 dcb93971 Michael Hanselmann
2165 dcb93971 Michael Hanselmann
        output.append(node_output)
2166 dcb93971 Michael Hanselmann
2167 dcb93971 Michael Hanselmann
    return output
2168 dcb93971 Michael Hanselmann
2169 dcb93971 Michael Hanselmann
2170 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2171 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2172 a8083063 Iustin Pop

2173 a8083063 Iustin Pop
  """
2174 a8083063 Iustin Pop
  HPATH = "node-add"
2175 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2176 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2177 a8083063 Iustin Pop
2178 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2179 a8083063 Iustin Pop
    """Build hooks env.
2180 a8083063 Iustin Pop

2181 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2182 a8083063 Iustin Pop

2183 a8083063 Iustin Pop
    """
2184 a8083063 Iustin Pop
    env = {
2185 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2186 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2187 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2188 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2189 a8083063 Iustin Pop
      }
2190 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2191 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2192 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2193 a8083063 Iustin Pop
2194 a8083063 Iustin Pop
  def CheckPrereq(self):
2195 a8083063 Iustin Pop
    """Check prerequisites.
2196 a8083063 Iustin Pop

2197 a8083063 Iustin Pop
    This checks:
2198 a8083063 Iustin Pop
     - the new node is not already in the config
2199 a8083063 Iustin Pop
     - it is resolvable
2200 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2201 a8083063 Iustin Pop

2202 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
2203 a8083063 Iustin Pop

2204 a8083063 Iustin Pop
    """
2205 a8083063 Iustin Pop
    node_name = self.op.node_name
2206 a8083063 Iustin Pop
    cfg = self.cfg
2207 a8083063 Iustin Pop
2208 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2209 a8083063 Iustin Pop
2210 bcf043c9 Iustin Pop
    node = dns_data.name
2211 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2212 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2213 a8083063 Iustin Pop
    if secondary_ip is None:
2214 a8083063 Iustin Pop
      secondary_ip = primary_ip
2215 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2216 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2217 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2218 e7c6e02b Michael Hanselmann
2219 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2220 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2221 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2222 e7c6e02b Michael Hanselmann
                                 node)
2223 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2224 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2225 a8083063 Iustin Pop
2226 a8083063 Iustin Pop
    for existing_node_name in node_list:
2227 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2228 e7c6e02b Michael Hanselmann
2229 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2230 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2231 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2232 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2233 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2234 e7c6e02b Michael Hanselmann
        continue
2235 e7c6e02b Michael Hanselmann
2236 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2237 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2238 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2239 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2240 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2241 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2242 a8083063 Iustin Pop
2243 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2244 a8083063 Iustin Pop
    # same as for the master
2245 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2246 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2247 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2248 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2249 a8083063 Iustin Pop
      if master_singlehomed:
2250 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2251 3ecf6786 Iustin Pop
                                   " new node has one")
2252 a8083063 Iustin Pop
      else:
2253 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2254 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2255 a8083063 Iustin Pop
2256 a8083063 Iustin Pop
    # checks reachablity
2257 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2258 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2259 a8083063 Iustin Pop
2260 a8083063 Iustin Pop
    if not newbie_singlehomed:
2261 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2262 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2263 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2264 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2265 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2266 a8083063 Iustin Pop
2267 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2268 ec0292f1 Iustin Pop
    mc_now, _ = self.cfg.GetMasterCandidateStats()
2269 ec0292f1 Iustin Pop
    master_candidate = mc_now < cp_size
2270 0fff97e9 Guido Trotter
2271 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
2272 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
2273 0fff97e9 Guido Trotter
                                 secondary_ip=secondary_ip,
2274 fc0fe88c Iustin Pop
                                 master_candidate=master_candidate,
2275 af64c0ea Iustin Pop
                                 offline=False, drained=False)
2276 a8083063 Iustin Pop
2277 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2278 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2279 a8083063 Iustin Pop

2280 a8083063 Iustin Pop
    """
2281 a8083063 Iustin Pop
    new_node = self.new_node
2282 a8083063 Iustin Pop
    node = new_node.name
2283 a8083063 Iustin Pop
2284 a8083063 Iustin Pop
    # check connectivity
2285 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2286 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2287 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2288 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2289 90b54c26 Iustin Pop
                   node, result.payload)
2290 a8083063 Iustin Pop
    else:
2291 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2292 90b54c26 Iustin Pop
                               " node version %s" %
2293 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2294 a8083063 Iustin Pop
2295 a8083063 Iustin Pop
    # setup ssh on node
2296 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2297 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2298 a8083063 Iustin Pop
    keyarray = []
2299 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2300 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2301 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2302 a8083063 Iustin Pop
2303 a8083063 Iustin Pop
    for i in keyfiles:
2304 a8083063 Iustin Pop
      f = open(i, 'r')
2305 a8083063 Iustin Pop
      try:
2306 a8083063 Iustin Pop
        keyarray.append(f.read())
2307 a8083063 Iustin Pop
      finally:
2308 a8083063 Iustin Pop
        f.close()
2309 a8083063 Iustin Pop
2310 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2311 72737a7f Iustin Pop
                                    keyarray[2],
2312 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2313 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2314 a8083063 Iustin Pop
2315 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2316 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2317 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2318 c8a0948f Michael Hanselmann
2319 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2320 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2321 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2322 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2323 4c4e4e1e Iustin Pop
                   prereq=True)
2324 c2fc8250 Iustin Pop
      if not result.payload:
2325 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2326 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2327 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2328 a8083063 Iustin Pop
2329 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2330 5c0527ed Guido Trotter
    node_verify_param = {
2331 5c0527ed Guido Trotter
      'nodelist': [node],
2332 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2333 5c0527ed Guido Trotter
    }
2334 5c0527ed Guido Trotter
2335 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2336 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2337 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2338 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2339 6f68a739 Iustin Pop
      nl_payload = result[verifier].payload['nodelist']
2340 6f68a739 Iustin Pop
      if nl_payload:
2341 6f68a739 Iustin Pop
        for failed in nl_payload:
2342 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2343 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2344 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2345 ff98055b Iustin Pop
2346 d8470559 Michael Hanselmann
    if self.op.readd:
2347 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2348 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2349 d8470559 Michael Hanselmann
    else:
2350 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2351 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2352 a8083063 Iustin Pop
2353 a8083063 Iustin Pop
2354 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2355 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2356 b31c8676 Iustin Pop

2357 b31c8676 Iustin Pop
  """
2358 b31c8676 Iustin Pop
  HPATH = "node-modify"
2359 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2360 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2361 b31c8676 Iustin Pop
  REQ_BGL = False
2362 b31c8676 Iustin Pop
2363 b31c8676 Iustin Pop
  def CheckArguments(self):
2364 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2365 b31c8676 Iustin Pop
    if node_name is None:
2366 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2367 b31c8676 Iustin Pop
    self.op.node_name = node_name
2368 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2369 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2370 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2371 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2372 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2373 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2374 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2375 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2376 c9d443ea Iustin Pop
                                 " state at the same time")
2377 b31c8676 Iustin Pop
2378 b31c8676 Iustin Pop
  def ExpandNames(self):
2379 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2380 b31c8676 Iustin Pop
2381 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2382 b31c8676 Iustin Pop
    """Build hooks env.
2383 b31c8676 Iustin Pop

2384 b31c8676 Iustin Pop
    This runs on the master node.
2385 b31c8676 Iustin Pop

2386 b31c8676 Iustin Pop
    """
2387 b31c8676 Iustin Pop
    env = {
2388 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2389 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2390 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2391 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2392 b31c8676 Iustin Pop
      }
2393 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2394 b31c8676 Iustin Pop
          self.op.node_name]
2395 b31c8676 Iustin Pop
    return env, nl, nl
2396 b31c8676 Iustin Pop
2397 b31c8676 Iustin Pop
  def CheckPrereq(self):
2398 b31c8676 Iustin Pop
    """Check prerequisites.
2399 b31c8676 Iustin Pop

2400 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2401 b31c8676 Iustin Pop

2402 b31c8676 Iustin Pop
    """
2403 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2404 b31c8676 Iustin Pop
2405 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2406 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2407 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2408 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2409 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2410 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2411 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2412 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2413 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2414 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2415 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2416 3a5ba66a Iustin Pop
        if self.op.force:
2417 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2418 3e83dd48 Iustin Pop
        else:
2419 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2420 3e83dd48 Iustin Pop
2421 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2422 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2423 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2424 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2425 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2426 3a5ba66a Iustin Pop
2427 b31c8676 Iustin Pop
    return
2428 b31c8676 Iustin Pop
2429 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2430 b31c8676 Iustin Pop
    """Modifies a node.
2431 b31c8676 Iustin Pop

2432 b31c8676 Iustin Pop
    """
2433 3a5ba66a Iustin Pop
    node = self.node
2434 b31c8676 Iustin Pop
2435 b31c8676 Iustin Pop
    result = []
2436 c9d443ea Iustin Pop
    changed_mc = False
2437 b31c8676 Iustin Pop
2438 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2439 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2440 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2441 c9d443ea Iustin Pop
      if self.op.offline == True:
2442 c9d443ea Iustin Pop
        if node.master_candidate:
2443 c9d443ea Iustin Pop
          node.master_candidate = False
2444 c9d443ea Iustin Pop
          changed_mc = True
2445 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2446 c9d443ea Iustin Pop
        if node.drained:
2447 c9d443ea Iustin Pop
          node.drained = False
2448 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2449 3a5ba66a Iustin Pop
2450 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2451 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2452 c9d443ea Iustin Pop
      changed_mc = True
2453 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2454 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2455 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2456 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
2457 0959c824 Iustin Pop
        if msg:
2458 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2459 b31c8676 Iustin Pop
2460 c9d443ea Iustin Pop
    if self.op.drained is not None:
2461 c9d443ea Iustin Pop
      node.drained = self.op.drained
2462 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2463 c9d443ea Iustin Pop
      if self.op.drained == True:
2464 c9d443ea Iustin Pop
        if node.master_candidate:
2465 c9d443ea Iustin Pop
          node.master_candidate = False
2466 c9d443ea Iustin Pop
          changed_mc = True
2467 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2468 c9d443ea Iustin Pop
        if node.offline:
2469 c9d443ea Iustin Pop
          node.offline = False
2470 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2471 c9d443ea Iustin Pop
2472 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2473 b31c8676 Iustin Pop
    self.cfg.Update(node)
2474 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2475 c9d443ea Iustin Pop
    if changed_mc:
2476 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2477 b31c8676 Iustin Pop
2478 b31c8676 Iustin Pop
    return result
2479 b31c8676 Iustin Pop
2480 b31c8676 Iustin Pop
2481 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
2482 f5118ade Iustin Pop
  """Powercycles a node.
2483 f5118ade Iustin Pop

2484 f5118ade Iustin Pop
  """
2485 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
2486 f5118ade Iustin Pop
  REQ_BGL = False
2487 f5118ade Iustin Pop
2488 f5118ade Iustin Pop
  def CheckArguments(self):
2489 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2490 f5118ade Iustin Pop
    if node_name is None:
2491 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2492 f5118ade Iustin Pop
    self.op.node_name = node_name
2493 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2494 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
2495 f5118ade Iustin Pop
                                 " parameter was not set")
2496 f5118ade Iustin Pop
2497 f5118ade Iustin Pop
  def ExpandNames(self):
2498 f5118ade Iustin Pop
    """Locking for PowercycleNode.
2499 f5118ade Iustin Pop

2500 f5118ade Iustin Pop
    This is a last-resource option and shouldn't block on other
2501 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
2502 f5118ade Iustin Pop

2503 f5118ade Iustin Pop
    """
2504 f5118ade Iustin Pop
    self.needed_locks = {}
2505 f5118ade Iustin Pop
2506 f5118ade Iustin Pop
  def CheckPrereq(self):
2507 f5118ade Iustin Pop
    """Check prerequisites.
2508 f5118ade Iustin Pop

2509 f5118ade Iustin Pop
    This LU has no prereqs.
2510 f5118ade Iustin Pop

2511 f5118ade Iustin Pop
    """
2512 f5118ade Iustin Pop
    pass
2513 f5118ade Iustin Pop
2514 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
2515 f5118ade Iustin Pop
    """Reboots a node.
2516 f5118ade Iustin Pop

2517 f5118ade Iustin Pop
    """
2518 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
2519 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
2520 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
2521 f5118ade Iustin Pop
    return result.payload
2522 f5118ade Iustin Pop
2523 f5118ade Iustin Pop
2524 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2525 a8083063 Iustin Pop
  """Query cluster configuration.
2526 a8083063 Iustin Pop

2527 a8083063 Iustin Pop
  """
2528 a8083063 Iustin Pop
  _OP_REQP = []
2529 642339cf Guido Trotter
  REQ_BGL = False
2530 642339cf Guido Trotter
2531 642339cf Guido Trotter
  def ExpandNames(self):
2532 642339cf Guido Trotter
    self.needed_locks = {}
2533 a8083063 Iustin Pop
2534 a8083063 Iustin Pop
  def CheckPrereq(self):
2535 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2536 a8083063 Iustin Pop

2537 a8083063 Iustin Pop
    """
2538 a8083063 Iustin Pop
    pass
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2541 a8083063 Iustin Pop
    """Return cluster config.
2542 a8083063 Iustin Pop

2543 a8083063 Iustin Pop
    """
2544 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2545 a8083063 Iustin Pop
    result = {
2546 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2547 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2548 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2549 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2550 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2551 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2552 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2553 469f88e1 Iustin Pop
      "master": cluster.master_node,
2554 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2555 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2556 7a735d6a Guido Trotter
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2557 7a735d6a Guido Trotter
                        for hypervisor in cluster.enabled_hypervisors]),
2558 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2559 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
2560 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2561 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2562 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2563 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2564 a8083063 Iustin Pop
      }
2565 a8083063 Iustin Pop
2566 a8083063 Iustin Pop
    return result
2567 a8083063 Iustin Pop
2568 a8083063 Iustin Pop
2569 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2570 ae5849b5 Michael Hanselmann
  """Return configuration values.
2571 a8083063 Iustin Pop

2572 a8083063 Iustin Pop
  """
2573 a8083063 Iustin Pop
  _OP_REQP = []
2574 642339cf Guido Trotter
  REQ_BGL = False
2575 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2576 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2577 642339cf Guido Trotter
2578 642339cf Guido Trotter
  def ExpandNames(self):
2579 642339cf Guido Trotter
    self.needed_locks = {}
2580 a8083063 Iustin Pop
2581 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2582 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2583 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2584 ae5849b5 Michael Hanselmann
2585 a8083063 Iustin Pop
  def CheckPrereq(self):
2586 a8083063 Iustin Pop
    """No prerequisites.
2587 a8083063 Iustin Pop

2588 a8083063 Iustin Pop
    """
2589 a8083063 Iustin Pop
    pass
2590 a8083063 Iustin Pop
2591 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2592 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2593 a8083063 Iustin Pop

2594 a8083063 Iustin Pop
    """
2595 ae5849b5 Michael Hanselmann
    values = []
2596 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2597 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2598 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2599 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2600 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2601 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2602 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2603 ae5849b5 Michael Hanselmann
      else:
2604 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2605 3ccafd0e Iustin Pop
      values.append(entry)
2606 ae5849b5 Michael Hanselmann
    return values
2607 a8083063 Iustin Pop
2608 a8083063 Iustin Pop
2609 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2610 a8083063 Iustin Pop
  """Bring up an instance's disks.
2611 a8083063 Iustin Pop

2612 a8083063 Iustin Pop
  """
2613 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2614 f22a8ba3 Guido Trotter
  REQ_BGL = False
2615 f22a8ba3 Guido Trotter
2616 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2617 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2618 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2619 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2620 f22a8ba3 Guido Trotter
2621 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2622 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2623 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2624 a8083063 Iustin Pop
2625 a8083063 Iustin Pop
  def CheckPrereq(self):
2626 a8083063 Iustin Pop
    """Check prerequisites.
2627 a8083063 Iustin Pop

2628 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2629 a8083063 Iustin Pop

2630 a8083063 Iustin Pop
    """
2631 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2632 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2633 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2634 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2635 a8083063 Iustin Pop
2636 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2637 a8083063 Iustin Pop
    """Activate the disks.
2638 a8083063 Iustin Pop

2639 a8083063 Iustin Pop
    """
2640 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2641 a8083063 Iustin Pop
    if not disks_ok:
2642 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2643 a8083063 Iustin Pop
2644 a8083063 Iustin Pop
    return disks_info
2645 a8083063 Iustin Pop
2646 a8083063 Iustin Pop
2647 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2648 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2649 a8083063 Iustin Pop

2650 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2651 a8083063 Iustin Pop

2652 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2653 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2654 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2655 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2656 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2657 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2658 e4376078 Iustin Pop
      won't result in an error return from the function
2659 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2660 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2661 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2662 a8083063 Iustin Pop

2663 a8083063 Iustin Pop
  """
2664 a8083063 Iustin Pop
  device_info = []
2665 a8083063 Iustin Pop
  disks_ok = True
2666 fdbd668d Iustin Pop
  iname = instance.name
2667 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2668 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2669 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2670 fdbd668d Iustin Pop
2671 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2672 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2673 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2674 fdbd668d Iustin Pop
  # SyncSource, etc.)
2675 fdbd668d Iustin Pop
2676 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2677 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2678 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2679 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2680 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2681 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2682 53c14ef1 Iustin Pop
      if msg:
2683 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2684 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2685 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2686 fdbd668d Iustin Pop
        if not ignore_secondaries:
2687 a8083063 Iustin Pop
          disks_ok = False
2688 fdbd668d Iustin Pop
2689 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2690 fdbd668d Iustin Pop
2691 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2692 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2693 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2694 fdbd668d Iustin Pop
      if node != instance.primary_node:
2695 fdbd668d Iustin Pop
        continue
2696 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2697 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2698 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2699 53c14ef1 Iustin Pop
      if msg:
2700 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2701 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2702 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2703 fdbd668d Iustin Pop
        disks_ok = False
2704 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2705 1dff8e07 Iustin Pop
                        result.payload))
2706 a8083063 Iustin Pop
2707 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2708 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2709 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2710 b352ab5b Iustin Pop
  for disk in instance.disks:
2711 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2712 b352ab5b Iustin Pop
2713 a8083063 Iustin Pop
  return disks_ok, device_info
2714 a8083063 Iustin Pop
2715 a8083063 Iustin Pop
2716 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2717 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2718 3ecf6786 Iustin Pop

2719 3ecf6786 Iustin Pop
  """
2720 b9bddb6b Iustin Pop
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2721 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2722 fe7b0351 Michael Hanselmann
  if not disks_ok:
2723 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2724 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2725 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2726 86d9d3bb Iustin Pop
                         " secondary node,"
2727 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2728 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2729 fe7b0351 Michael Hanselmann
2730 fe7b0351 Michael Hanselmann
2731 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2732 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2733 a8083063 Iustin Pop

2734 a8083063 Iustin Pop
  """
2735 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2736 f22a8ba3 Guido Trotter
  REQ_BGL = False
2737 f22a8ba3 Guido Trotter
2738 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2739 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2740 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2741 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2742 f22a8ba3 Guido Trotter
2743 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2744 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2745 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2746 a8083063 Iustin Pop
2747 a8083063 Iustin Pop
  def CheckPrereq(self):
2748 a8083063 Iustin Pop
    """Check prerequisites.
2749 a8083063 Iustin Pop

2750 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2751 a8083063 Iustin Pop

2752 a8083063 Iustin Pop
    """
2753 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2754 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2755 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2756 a8083063 Iustin Pop
2757 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2758 a8083063 Iustin Pop
    """Deactivate the disks
2759 a8083063 Iustin Pop

2760 a8083063 Iustin Pop
    """
2761 a8083063 Iustin Pop
    instance = self.instance
2762 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2763 a8083063 Iustin Pop
2764 a8083063 Iustin Pop
2765 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2766 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2767 155d6c75 Guido Trotter

2768 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2769 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2770 155d6c75 Guido Trotter

2771 155d6c75 Guido Trotter
  """
2772 aca13712 Iustin Pop
  pnode = instance.primary_node
2773 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2774 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
2775 aca13712 Iustin Pop
2776 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
2777 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2778 155d6c75 Guido Trotter
                             " block devices.")
2779 155d6c75 Guido Trotter
2780 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
2783 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2784 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2785 a8083063 Iustin Pop

2786 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2787 a8083063 Iustin Pop

2788 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2789 a8083063 Iustin Pop
  ignored.
2790 a8083063 Iustin Pop

2791 a8083063 Iustin Pop
  """
2792 cacfd1fd Iustin Pop
  all_result = True
2793 a8083063 Iustin Pop
  for disk in instance.disks:
2794 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2795 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2796 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2797 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2798 cacfd1fd Iustin Pop
      if msg:
2799 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2800 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2801 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2802 cacfd1fd Iustin Pop
          all_result = False
2803 cacfd1fd Iustin Pop
  return all_result
2804 a8083063 Iustin Pop
2805 a8083063 Iustin Pop
2806 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2807 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2808 d4f16fd9 Iustin Pop

2809 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2810 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2811 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2812 d4f16fd9 Iustin Pop
  exception.
2813 d4f16fd9 Iustin Pop

2814 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2815 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2816 e69d05fd Iustin Pop
  @type node: C{str}
2817 e69d05fd Iustin Pop
  @param node: the node to check
2818 e69d05fd Iustin Pop
  @type reason: C{str}
2819 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2820 e69d05fd Iustin Pop
  @type requested: C{int}
2821 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2822 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2823 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2824 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2825 e69d05fd Iustin Pop
      we cannot check the node
2826 d4f16fd9 Iustin Pop

2827 d4f16fd9 Iustin Pop
  """
2828 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2829 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2830 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2831 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2832 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2833 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
2834 d4f16fd9 Iustin Pop
  if requested > free_mem:
2835 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2836 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
2837 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
2838 d4f16fd9 Iustin Pop
2839 d4f16fd9 Iustin Pop
2840 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2841 a8083063 Iustin Pop
  """Starts an instance.
2842 a8083063 Iustin Pop

2843 a8083063 Iustin Pop
  """
2844 a8083063 Iustin Pop
  HPATH = "instance-start"
2845 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2846 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2847 e873317a Guido Trotter
  REQ_BGL = False
2848 e873317a Guido Trotter
2849 e873317a Guido Trotter
  def ExpandNames(self):
2850 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2851 a8083063 Iustin Pop
2852 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2853 a8083063 Iustin Pop
    """Build hooks env.
2854 a8083063 Iustin Pop

2855 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2856 a8083063 Iustin Pop

2857 a8083063 Iustin Pop
    """
2858 a8083063 Iustin Pop
    env = {
2859 a8083063 Iustin Pop
      "FORCE": self.op.force,
2860 a8083063 Iustin Pop
      }
2861 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2862 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2863 a8083063 Iustin Pop
    return env, nl, nl
2864 a8083063 Iustin Pop
2865 a8083063 Iustin Pop
  def CheckPrereq(self):
2866 a8083063 Iustin Pop
    """Check prerequisites.
2867 a8083063 Iustin Pop

2868 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2869 a8083063 Iustin Pop

2870 a8083063 Iustin Pop
    """
2871 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2872 e873317a Guido Trotter
    assert self.instance is not None, \
2873 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2874 a8083063 Iustin Pop
2875 d04aaa2f Iustin Pop
    # extra beparams
2876 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2877 d04aaa2f Iustin Pop
    if self.beparams:
2878 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2879 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2880 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2881 d04aaa2f Iustin Pop
      # fill the beparams dict
2882 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2883 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2884 d04aaa2f Iustin Pop
2885 d04aaa2f Iustin Pop
    # extra hvparams
2886 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2887 d04aaa2f Iustin Pop
    if self.hvparams:
2888 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2889 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2890 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2891 d04aaa2f Iustin Pop
2892 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2893 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2894 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2895 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2896 d04aaa2f Iustin Pop
                                    instance.hvparams)
2897 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2898 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2899 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
2900 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2901 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
2902 d04aaa2f Iustin Pop
2903 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2904 7527a8a4 Iustin Pop
2905 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2906 a8083063 Iustin Pop
    # check bridges existance
2907 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2908 a8083063 Iustin Pop
2909 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2910 f1926756 Guido Trotter
                                              instance.name,
2911 f1926756 Guido Trotter
                                              instance.hypervisor)
2912 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2913 4c4e4e1e Iustin Pop
                      prereq=True)
2914 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
2915 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
2916 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
2917 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
2918 d4f16fd9 Iustin Pop
2919 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2920 a8083063 Iustin Pop
    """Start the instance.
2921 a8083063 Iustin Pop

2922 a8083063 Iustin Pop
    """
2923 a8083063 Iustin Pop
    instance = self.instance
2924 a8083063 Iustin Pop
    force = self.op.force
2925 a8083063 Iustin Pop
2926 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2927 fe482621 Iustin Pop
2928 a8083063 Iustin Pop
    node_current = instance.primary_node
2929 a8083063 Iustin Pop
2930 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
2931 a8083063 Iustin Pop
2932 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
2933 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
2934 4c4e4e1e Iustin Pop
    msg = result.fail_msg
2935 dd279568 Iustin Pop
    if msg:
2936 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
2937 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
2938 a8083063 Iustin Pop
2939 a8083063 Iustin Pop
2940 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2941 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2942 bf6929a2 Alexander Schreiber

2943 bf6929a2 Alexander Schreiber
  """
2944 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2945 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2946 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2947 e873317a Guido Trotter
  REQ_BGL = False
2948 e873317a Guido Trotter
2949 e873317a Guido Trotter
  def ExpandNames(self):
2950 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2951 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2952 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
2953 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2954 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
2955 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
2956 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
2957 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2958 bf6929a2 Alexander Schreiber
2959 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2960 bf6929a2 Alexander Schreiber
    """Build hooks env.
2961 bf6929a2 Alexander Schreiber

2962 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2963 bf6929a2 Alexander Schreiber

2964 bf6929a2 Alexander Schreiber
    """
2965 bf6929a2 Alexander Schreiber
    env = {
2966 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2967 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
2968 bf6929a2 Alexander Schreiber
      }
2969 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2970 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2971 bf6929a2 Alexander Schreiber
    return env, nl, nl
2972 bf6929a2 Alexander Schreiber
2973 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2974 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2975 bf6929a2 Alexander Schreiber

2976 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2977 bf6929a2 Alexander Schreiber

2978 bf6929a2 Alexander Schreiber
    """
2979 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2980 e873317a Guido Trotter
    assert self.instance is not None, \
2981 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2982 bf6929a2 Alexander Schreiber
2983 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2984 7527a8a4 Iustin Pop
2985 bf6929a2 Alexander Schreiber
    # check bridges existance
2986 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2987 bf6929a2 Alexander Schreiber
2988 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2989 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2990 bf6929a2 Alexander Schreiber

2991 bf6929a2 Alexander Schreiber
    """
2992 bf6929a2 Alexander Schreiber
    instance = self.instance
2993 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2994 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2995 bf6929a2 Alexander Schreiber
2996 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2997 bf6929a2 Alexander Schreiber
2998 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2999 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3000 ae48ac32 Iustin Pop
      for disk in instance.disks:
3001 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3002 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3003 07813a9e Iustin Pop
                                             reboot_type)
3004 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3005 bf6929a2 Alexander Schreiber
    else:
3006 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3007 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3008 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3009 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3010 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3011 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3012 dd279568 Iustin Pop
      if msg:
3013 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3014 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3015 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3016 bf6929a2 Alexander Schreiber
3017 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3018 bf6929a2 Alexander Schreiber
3019 bf6929a2 Alexander Schreiber
3020 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3021 a8083063 Iustin Pop
  """Shutdown an instance.
3022 a8083063 Iustin Pop

3023 a8083063 Iustin Pop
  """
3024 a8083063 Iustin Pop
  HPATH = "instance-stop"
3025 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3026 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3027 e873317a Guido Trotter
  REQ_BGL = False
3028 e873317a Guido Trotter
3029 e873317a Guido Trotter
  def ExpandNames(self):
3030 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3031 a8083063 Iustin Pop
3032 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3033 a8083063 Iustin Pop
    """Build hooks env.
3034 a8083063 Iustin Pop

3035 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3036 a8083063 Iustin Pop

3037 a8083063 Iustin Pop
    """
3038 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3039 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3040 a8083063 Iustin Pop
    return env, nl, nl
3041 a8083063 Iustin Pop
3042 a8083063 Iustin Pop
  def CheckPrereq(self):
3043 a8083063 Iustin Pop
    """Check prerequisites.
3044 a8083063 Iustin Pop

3045 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3046 a8083063 Iustin Pop

3047 a8083063 Iustin Pop
    """
3048 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3049 e873317a Guido Trotter
    assert self.instance is not None, \
3050 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3051 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3052 a8083063 Iustin Pop
3053 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3054 a8083063 Iustin Pop
    """Shutdown the instance.
3055 a8083063 Iustin Pop

3056 a8083063 Iustin Pop
    """
3057 a8083063 Iustin Pop
    instance = self.instance
3058 a8083063 Iustin Pop
    node_current = instance.primary_node
3059 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3060 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3061 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3062 1fae010f Iustin Pop
    if msg:
3063 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3064 a8083063 Iustin Pop
3065 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3066 a8083063 Iustin Pop
3067 a8083063 Iustin Pop
3068 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3069 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3070 fe7b0351 Michael Hanselmann

3071 fe7b0351 Michael Hanselmann
  """
3072 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3073 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3074 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3075 4e0b4d2d Guido Trotter
  REQ_BGL = False
3076 4e0b4d2d Guido Trotter
3077 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3078 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3079 fe7b0351 Michael Hanselmann
3080 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3081 fe7b0351 Michael Hanselmann
    """Build hooks env.
3082 fe7b0351 Michael Hanselmann

3083 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3084 fe7b0351 Michael Hanselmann

3085 fe7b0351 Michael Hanselmann
    """
3086 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3087 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3088 fe7b0351 Michael Hanselmann
    return env, nl, nl
3089 fe7b0351 Michael Hanselmann
3090 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3091 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3092 fe7b0351 Michael Hanselmann

3093 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3094 fe7b0351 Michael Hanselmann

3095 fe7b0351 Michael Hanselmann
    """
3096 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3097 4e0b4d2d Guido Trotter
    assert instance is not None, \
3098 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3099 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3100 4e0b4d2d Guido Trotter
3101 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3102 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3103 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3104 0d68c45d Iustin Pop
    if instance.admin_up:
3105 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3106 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3107 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3108 72737a7f Iustin Pop
                                              instance.name,
3109 72737a7f Iustin Pop
                                              instance.hypervisor)
3110 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3111 4c4e4e1e Iustin Pop
                      prereq=True)
3112 7ad1af4a Iustin Pop
    if remote_info.payload:
3113 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3114 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3115 3ecf6786 Iustin Pop
                                  instance.primary_node))
3116 d0834de3 Michael Hanselmann
3117 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3118 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3119 d0834de3 Michael Hanselmann
      # OS verification
3120 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3121 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3122 d0834de3 Michael Hanselmann
      if pnode is None:
3123 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3124 3ecf6786 Iustin Pop
                                   self.op.pnode)
3125 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3126 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3127 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3128 d0834de3 Michael Hanselmann
3129 fe7b0351 Michael Hanselmann
    self.instance = instance
3130 fe7b0351 Michael Hanselmann
3131 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3132 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3133 fe7b0351 Michael Hanselmann

3134 fe7b0351 Michael Hanselmann
    """
3135 fe7b0351 Michael Hanselmann
    inst = self.instance
3136 fe7b0351 Michael Hanselmann
3137 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3138 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3139 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3140 97abc79f Iustin Pop
      self.cfg.Update(inst)
3141 d0834de3 Michael Hanselmann
3142 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3143 fe7b0351 Michael Hanselmann
    try:
3144 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3145 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3146 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3147 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3148 fe7b0351 Michael Hanselmann
    finally:
3149 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3150 fe7b0351 Michael Hanselmann
3151 fe7b0351 Michael Hanselmann
3152 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3153 decd5f45 Iustin Pop
  """Rename an instance.
3154 decd5f45 Iustin Pop

3155 decd5f45 Iustin Pop
  """
3156 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3157 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3158 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3159 decd5f45 Iustin Pop
3160 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3161 decd5f45 Iustin Pop
    """Build hooks env.
3162 decd5f45 Iustin Pop

3163 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3164 decd5f45 Iustin Pop

3165 decd5f45 Iustin Pop
    """
3166 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3167 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3168 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3169 decd5f45 Iustin Pop
    return env, nl, nl
3170 decd5f45 Iustin Pop
3171 decd5f45 Iustin Pop
  def CheckPrereq(self):
3172 decd5f45 Iustin Pop
    """Check prerequisites.
3173 decd5f45 Iustin Pop

3174 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3175 decd5f45 Iustin Pop

3176 decd5f45 Iustin Pop
    """
3177 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3178 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3179 decd5f45 Iustin Pop
    if instance is None:
3180 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3181 decd5f45 Iustin Pop
                                 self.op.instance_name)
3182 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3183 7527a8a4 Iustin Pop
3184 0d68c45d Iustin Pop
    if instance.admin_up:
3185 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3186 decd5f45 Iustin Pop
                                 self.op.instance_name)
3187 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3188 72737a7f Iustin Pop
                                              instance.name,
3189 72737a7f Iustin Pop
                                              instance.hypervisor)
3190 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3191 4c4e4e1e Iustin Pop
                      prereq=True)
3192 7ad1af4a Iustin Pop
    if remote_info.payload:
3193 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3194 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3195 decd5f45 Iustin Pop
                                  instance.primary_node))
3196 decd5f45 Iustin Pop
    self.instance = instance
3197 decd5f45 Iustin Pop
3198 decd5f45 Iustin Pop
    # new name verification
3199 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3200 decd5f45 Iustin Pop
3201 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3202 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3203 7bde3275 Guido Trotter
    if new_name in instance_list:
3204 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3205 c09f363f Manuel Franceschini
                                 new_name)
3206 7bde3275 Guido Trotter
3207 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3208 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3209 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3210 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3211 decd5f45 Iustin Pop
3212 decd5f45 Iustin Pop
3213 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3214 decd5f45 Iustin Pop
    """Reinstall the instance.
3215 decd5f45 Iustin Pop

3216 decd5f45 Iustin Pop
    """
3217 decd5f45 Iustin Pop
    inst = self.instance
3218 decd5f45 Iustin Pop
    old_name = inst.name
3219 decd5f45 Iustin Pop
3220 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3221 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3222 b23c4333 Manuel Franceschini
3223 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3224 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3225 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3226 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3227 decd5f45 Iustin Pop
3228 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3229 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3230 decd5f45 Iustin Pop
3231 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3232 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3233 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3234 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3235 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3236 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3237 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3238 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3239 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3240 b23c4333 Manuel Franceschini
3241 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3242 decd5f45 Iustin Pop
    try:
3243 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3244 781de953 Iustin Pop
                                                 old_name)
3245 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3246 96841384 Iustin Pop
      if msg:
3247 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3248 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3249 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3250 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3251 decd5f45 Iustin Pop
    finally:
3252 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3253 decd5f45 Iustin Pop
3254 decd5f45 Iustin Pop
3255 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3256 a8083063 Iustin Pop
  """Remove an instance.
3257 a8083063 Iustin Pop

3258 a8083063 Iustin Pop
  """
3259 a8083063 Iustin Pop
  HPATH = "instance-remove"
3260 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3261 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3262 cf472233 Guido Trotter
  REQ_BGL = False
3263 cf472233 Guido Trotter
3264 cf472233 Guido Trotter
  def ExpandNames(self):
3265 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3266 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3267 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3268 cf472233 Guido Trotter
3269 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3270 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3271 cf472233 Guido Trotter
      self._LockInstancesNodes()
3272 a8083063 Iustin Pop
3273 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3274 a8083063 Iustin Pop
    """Build hooks env.
3275 a8083063 Iustin Pop

3276 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
    """
3279 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3280 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3281 a8083063 Iustin Pop
    return env, nl, nl
3282 a8083063 Iustin Pop
3283 a8083063 Iustin Pop
  def CheckPrereq(self):
3284 a8083063 Iustin Pop
    """Check prerequisites.
3285 a8083063 Iustin Pop

3286 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3287 a8083063 Iustin Pop

3288 a8083063 Iustin Pop
    """
3289 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3290 cf472233 Guido Trotter
    assert self.instance is not None, \
3291 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3294 a8083063 Iustin Pop
    """Remove the instance.
3295 a8083063 Iustin Pop

3296 a8083063 Iustin Pop
    """
3297 a8083063 Iustin Pop
    instance = self.instance
3298 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3299 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3300 a8083063 Iustin Pop
3301 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3302 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3303 1fae010f Iustin Pop
    if msg:
3304 1d67656e Iustin Pop
      if self.op.ignore_failures:
3305 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3306 1d67656e Iustin Pop
      else:
3307 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3308 1fae010f Iustin Pop
                                 " node %s: %s" %
3309 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3310 a8083063 Iustin Pop
3311 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3312 a8083063 Iustin Pop
3313 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3314 1d67656e Iustin Pop
      if self.op.ignore_failures:
3315 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3316 1d67656e Iustin Pop
      else:
3317 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3318 a8083063 Iustin Pop
3319 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3322 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3323 a8083063 Iustin Pop
3324 a8083063 Iustin Pop
3325 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3326 a8083063 Iustin Pop
  """Logical unit for querying instances.
3327 a8083063 Iustin Pop

3328 a8083063 Iustin Pop
  """
3329 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3330 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3331 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3332 5b460366 Iustin Pop
                                    "admin_state",
3333 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3334 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3335 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3336 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3337 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3338 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3339 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3340 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3341 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3342 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3343 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3344 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3345 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3346 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3347 31bf511f Iustin Pop
3348 a8083063 Iustin Pop
3349 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3350 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3351 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3352 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3353 a8083063 Iustin Pop
3354 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3355 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3356 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3357 7eb9d8f7 Guido Trotter
3358 57a2fb91 Iustin Pop
    if self.op.names:
3359 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3360 7eb9d8f7 Guido Trotter
    else:
3361 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3362 7eb9d8f7 Guido Trotter
3363 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3364 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3365 57a2fb91 Iustin Pop
    if self.do_locking:
3366 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3367 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3368 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3369 7eb9d8f7 Guido Trotter
3370 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3371 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3372 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3373 7eb9d8f7 Guido Trotter
3374 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3375 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3376 7eb9d8f7 Guido Trotter

3377 7eb9d8f7 Guido Trotter
    """
3378 57a2fb91 Iustin Pop
    pass
3379 069dcc86 Iustin Pop
3380 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3381 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3382 a8083063 Iustin Pop

3383 a8083063 Iustin Pop
    """
3384 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3385 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3386 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3387 a7f5dc98 Iustin Pop
      if self.do_locking:
3388 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3389 a7f5dc98 Iustin Pop
      else:
3390 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3391 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3392 57a2fb91 Iustin Pop
    else:
3393 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3394 a7f5dc98 Iustin Pop
      if self.do_locking:
3395 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3396 a7f5dc98 Iustin Pop
      else:
3397 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3398 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3399 a7f5dc98 Iustin Pop
      if missing:
3400 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3401 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3402 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3403 c1f1cbb2 Iustin Pop
3404 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3405 a8083063 Iustin Pop
3406 a8083063 Iustin Pop
    # begin data gathering
3407 a8083063 Iustin Pop
3408 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3409 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3410 a8083063 Iustin Pop
3411 a8083063 Iustin Pop
    bad_nodes = []
3412 cbfc4681 Iustin Pop
    off_nodes = []
3413 ec79568d Iustin Pop
    if self.do_node_query:
3414 a8083063 Iustin Pop
      live_data = {}
3415 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3416 a8083063 Iustin Pop
      for name in nodes:
3417 a8083063 Iustin Pop
        result = node_data[name]
3418 cbfc4681 Iustin Pop
        if result.offline:
3419 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3420 cbfc4681 Iustin Pop
          off_nodes.append(name)
3421 4c4e4e1e Iustin Pop
        if result.failed or result.fail_msg:
3422 a8083063 Iustin Pop
          bad_nodes.append(name)
3423 781de953 Iustin Pop
        else:
3424 2fa74ef4 Iustin Pop
          if result.payload:
3425 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
3426 2fa74ef4 Iustin Pop
          # else no instance is alive
3427 a8083063 Iustin Pop
    else:
3428 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3429 a8083063 Iustin Pop
3430 a8083063 Iustin Pop
    # end data gathering
3431 a8083063 Iustin Pop
3432 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3433 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3434 a8083063 Iustin Pop
    output = []
3435 a8083063 Iustin Pop
    for instance in instance_list:
3436 a8083063 Iustin Pop
      iout = []
3437 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3438 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3439 a8083063 Iustin Pop
      for field in self.op.output_fields:
3440 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3441 a8083063 Iustin Pop
        if field == "name":
3442 a8083063 Iustin Pop
          val = instance.name
3443 a8083063 Iustin Pop
        elif field == "os":
3444 a8083063 Iustin Pop
          val = instance.os
3445 a8083063 Iustin Pop
        elif field == "pnode":
3446 a8083063 Iustin Pop
          val = instance.primary_node
3447 a8083063 Iustin Pop
        elif field == "snodes":
3448 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3449 a8083063 Iustin Pop
        elif field == "admin_state":
3450 0d68c45d Iustin Pop
          val = instance.admin_up
3451 a8083063 Iustin Pop
        elif field == "oper_state":
3452 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3453 8a23d2d3 Iustin Pop
            val = None
3454 a8083063 Iustin Pop
          else:
3455 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3456 d8052456 Iustin Pop
        elif field == "status":
3457 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3458 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3459 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3460 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3461 d8052456 Iustin Pop
          else:
3462 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3463 d8052456 Iustin Pop
            if running:
3464 0d68c45d Iustin Pop
              if instance.admin_up:
3465 d8052456 Iustin Pop
                val = "running"
3466 d8052456 Iustin Pop
              else:
3467 d8052456 Iustin Pop
                val = "ERROR_up"
3468 d8052456 Iustin Pop
            else:
3469 0d68c45d Iustin Pop
              if instance.admin_up:
3470 d8052456 Iustin Pop
                val = "ERROR_down"
3471 d8052456 Iustin Pop
              else:
3472 d8052456 Iustin Pop
                val = "ADMIN_down"
3473 a8083063 Iustin Pop
        elif field == "oper_ram":
3474 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3475 8a23d2d3 Iustin Pop
            val = None
3476 a8083063 Iustin Pop
          elif instance.name in live_data:
3477 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3478 a8083063 Iustin Pop
          else:
3479 a8083063 Iustin Pop
            val = "-"
3480 a8083063 Iustin Pop
        elif field == "disk_template":
3481 a8083063 Iustin Pop
          val = instance.disk_template
3482 a8083063 Iustin Pop
        elif field == "ip":
3483 a8083063 Iustin Pop
          val = instance.nics[0].ip
3484 a8083063 Iustin Pop
        elif field == "bridge":
3485 a8083063 Iustin Pop
          val = instance.nics[0].bridge
3486 a8083063 Iustin Pop
        elif field == "mac":
3487 a8083063 Iustin Pop
          val = instance.nics[0].mac
3488 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3489 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3490 ad24e046 Iustin Pop
          try:
3491 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3492 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3493 8a23d2d3 Iustin Pop
            val = None
3494 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3495 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3496 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3497 130a6a6f Iustin Pop
        elif field == "tags":
3498 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3499 38d7239a Iustin Pop
        elif field == "serial_no":
3500 38d7239a Iustin Pop
          val = instance.serial_no
3501 5018a335 Iustin Pop
        elif field == "network_port":
3502 5018a335 Iustin Pop
          val = instance.network_port
3503 338e51e8 Iustin Pop
        elif field == "hypervisor":
3504 338e51e8 Iustin Pop
          val = instance.hypervisor
3505 338e51e8 Iustin Pop
        elif field == "hvparams":
3506 338e51e8 Iustin Pop
          val = i_hv
3507 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3508 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3509 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3510 338e51e8 Iustin Pop
        elif field == "beparams":
3511 338e51e8 Iustin Pop
          val = i_be
3512 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3513 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3514 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3515 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3516 71c1af58 Iustin Pop
          # matches a variable list
3517 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3518 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3519 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3520 71c1af58 Iustin Pop
              val = len(instance.disks)
3521 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3522 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3523 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3524 3e0cea06 Iustin Pop
              try:
3525 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3526 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3527 71c1af58 Iustin Pop
                val = None
3528 71c1af58 Iustin Pop
            else:
3529 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3530 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3531 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3532 71c1af58 Iustin Pop
              val = len(instance.nics)
3533 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3534 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3535 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3536 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3537 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3538 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3539 71c1af58 Iustin Pop
            else:
3540 71c1af58 Iustin Pop
              # index-based item
3541 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3542 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3543 71c1af58 Iustin Pop
                val = None
3544 71c1af58 Iustin Pop
              else:
3545 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3546 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3547 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3548 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3549 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3550 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3551 71c1af58 Iustin Pop
                else:
3552 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3553 71c1af58 Iustin Pop
          else:
3554 71c1af58 Iustin Pop
            assert False, "Unhandled variable parameter"
3555 a8083063 Iustin Pop
        else:
3556 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
3557 a8083063 Iustin Pop
        iout.append(val)
3558 a8083063 Iustin Pop
      output.append(iout)
3559 a8083063 Iustin Pop
3560 a8083063 Iustin Pop
    return output
3561 a8083063 Iustin Pop
3562 a8083063 Iustin Pop
3563 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3564 a8083063 Iustin Pop
  """Failover an instance.
3565 a8083063 Iustin Pop

3566 a8083063 Iustin Pop
  """
3567 a8083063 Iustin Pop
  HPATH = "instance-failover"
3568 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3569 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3570 c9e5c064 Guido Trotter
  REQ_BGL = False
3571 c9e5c064 Guido Trotter
3572 c9e5c064 Guido Trotter
  def ExpandNames(self):
3573 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3574 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3575 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3576 c9e5c064 Guido Trotter
3577 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3578 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3579 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3580 a8083063 Iustin Pop
3581 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3582 a8083063 Iustin Pop
    """Build hooks env.
3583 a8083063 Iustin Pop

3584 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3585 a8083063 Iustin Pop

3586 a8083063 Iustin Pop
    """
3587 a8083063 Iustin Pop
    env = {
3588 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3589 a8083063 Iustin Pop
      }
3590 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3591 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3592 a8083063 Iustin Pop
    return env, nl, nl
3593 a8083063 Iustin Pop
3594 a8083063 Iustin Pop
  def CheckPrereq(self):
3595 a8083063 Iustin Pop
    """Check prerequisites.
3596 a8083063 Iustin Pop

3597 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3598 a8083063 Iustin Pop

3599 a8083063 Iustin Pop
    """
3600 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3601 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3602 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3603 a8083063 Iustin Pop
3604 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3605 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3606 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3607 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3608 2a710df1 Michael Hanselmann
3609 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3610 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3611 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3612 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3613 2a710df1 Michael Hanselmann
3614 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3615 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3616 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3617 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
3618 b9bddb6b Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3619 338e51e8 Iustin Pop
                         instance.name, bep[constants.BE_MEMORY],
3620 e69d05fd Iustin Pop
                         instance.hypervisor)
3621 a8083063 Iustin Pop
    # check bridge existance
3622 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3623 a8083063 Iustin Pop
3624 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3625 a8083063 Iustin Pop
    """Failover an instance.
3626 a8083063 Iustin Pop

3627 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3628 a8083063 Iustin Pop
    starting it on the secondary.
3629 a8083063 Iustin Pop

3630 a8083063 Iustin Pop
    """
3631 a8083063 Iustin Pop
    instance = self.instance
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
    source_node = instance.primary_node
3634 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3635 a8083063 Iustin Pop
3636 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3637 a8083063 Iustin Pop
    for dev in instance.disks:
3638 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3639 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3640 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3641 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3642 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3643 a8083063 Iustin Pop
3644 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3645 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3646 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3647 a8083063 Iustin Pop
3648 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3649 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3650 1fae010f Iustin Pop
    if msg:
3651 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3652 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3653 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3654 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3655 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3656 24a40d57 Iustin Pop
      else:
3657 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3658 1fae010f Iustin Pop
                                 " node %s: %s" %
3659 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3660 a8083063 Iustin Pop
3661 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3662 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3663 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3664 a8083063 Iustin Pop
3665 a8083063 Iustin Pop
    instance.primary_node = target_node
3666 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3667 b6102dab Guido Trotter
    self.cfg.Update(instance)
3668 a8083063 Iustin Pop
3669 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3670 0d68c45d Iustin Pop
    if instance.admin_up:
3671 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3672 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3673 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3674 12a0cfbe Guido Trotter
3675 b9bddb6b Iustin Pop
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3676 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3677 12a0cfbe Guido Trotter
      if not disks_ok:
3678 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3679 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3680 a8083063 Iustin Pop
3681 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3682 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3683 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3684 dd279568 Iustin Pop
      if msg:
3685 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3686 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3687 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3688 a8083063 Iustin Pop
3689 a8083063 Iustin Pop
3690 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3691 53c776b5 Iustin Pop
  """Migrate an instance.
3692 53c776b5 Iustin Pop

3693 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3694 53c776b5 Iustin Pop
  which is done with shutdown.
3695 53c776b5 Iustin Pop

3696 53c776b5 Iustin Pop
  """
3697 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3698 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3699 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3700 53c776b5 Iustin Pop
3701 53c776b5 Iustin Pop
  REQ_BGL = False
3702 53c776b5 Iustin Pop
3703 53c776b5 Iustin Pop
  def ExpandNames(self):
3704 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3705 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3706 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3707 53c776b5 Iustin Pop
3708 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3709 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3710 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3711 53c776b5 Iustin Pop
3712 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3713 53c776b5 Iustin Pop
    """Build hooks env.
3714 53c776b5 Iustin Pop

3715 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3716 53c776b5 Iustin Pop

3717 53c776b5 Iustin Pop
    """
3718 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3719 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3720 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3721 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3722 53c776b5 Iustin Pop
    return env, nl, nl
3723 53c776b5 Iustin Pop
3724 53c776b5 Iustin Pop
  def CheckPrereq(self):
3725 53c776b5 Iustin Pop
    """Check prerequisites.
3726 53c776b5 Iustin Pop

3727 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3728 53c776b5 Iustin Pop

3729 53c776b5 Iustin Pop
    """
3730 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3731 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3732 53c776b5 Iustin Pop
    if instance is None:
3733 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3734 53c776b5 Iustin Pop
                                 self.op.instance_name)
3735 53c776b5 Iustin Pop
3736 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3737 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3738 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3739 53c776b5 Iustin Pop
3740 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3741 53c776b5 Iustin Pop
    if not secondary_nodes:
3742 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3743 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3744 53c776b5 Iustin Pop
3745 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3746 53c776b5 Iustin Pop
3747 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3748 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3749 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3750 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3751 53c776b5 Iustin Pop
                         instance.hypervisor)
3752 53c776b5 Iustin Pop
3753 53c776b5 Iustin Pop
    # check bridge existance
3754 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3755 53c776b5 Iustin Pop
3756 53c776b5 Iustin Pop
    if not self.op.cleanup:
3757 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3758 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3759 53c776b5 Iustin Pop
                                                 instance)
3760 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
3761 53c776b5 Iustin Pop
3762 53c776b5 Iustin Pop
    self.instance = instance
3763 53c776b5 Iustin Pop
3764 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3765 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3766 53c776b5 Iustin Pop

3767 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3768 53c776b5 Iustin Pop

3769 53c776b5 Iustin Pop
    """
3770 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3771 53c776b5 Iustin Pop
    all_done = False
3772 53c776b5 Iustin Pop
    while not all_done:
3773 53c776b5 Iustin Pop
      all_done = True
3774 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3775 53c776b5 Iustin Pop
                                            self.nodes_ip,
3776 53c776b5 Iustin Pop
                                            self.instance.disks)
3777 53c776b5 Iustin Pop
      min_percent = 100
3778 53c776b5 Iustin Pop
      for node, nres in result.items():
3779 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
3780 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3781 53c776b5 Iustin Pop
        all_done = all_done and node_done
3782 53c776b5 Iustin Pop
        if node_percent is not None:
3783 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3784 53c776b5 Iustin Pop
      if not all_done:
3785 53c776b5 Iustin Pop
        if min_percent < 100:
3786 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3787 53c776b5 Iustin Pop
        time.sleep(2)
3788 53c776b5 Iustin Pop
3789 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3790 53c776b5 Iustin Pop
    """Demote a node to secondary.
3791 53c776b5 Iustin Pop

3792 53c776b5 Iustin Pop
    """
3793 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3794 53c776b5 Iustin Pop
3795 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3796 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3797 53c776b5 Iustin Pop
3798 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3799 53c776b5 Iustin Pop
                                          self.instance.disks)
3800 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
3801 53c776b5 Iustin Pop
3802 53c776b5 Iustin Pop
  def _GoStandalone(self):
3803 53c776b5 Iustin Pop
    """Disconnect from the network.
3804 53c776b5 Iustin Pop

3805 53c776b5 Iustin Pop
    """
3806 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3807 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3808 53c776b5 Iustin Pop
                                               self.instance.disks)
3809 53c776b5 Iustin Pop
    for node, nres in result.items():
3810 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
3811 53c776b5 Iustin Pop
3812 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3813 53c776b5 Iustin Pop
    """Reconnect to the network.
3814 53c776b5 Iustin Pop

3815 53c776b5 Iustin Pop
    """
3816 53c776b5 Iustin Pop
    if multimaster:
3817 53c776b5 Iustin Pop
      msg = "dual-master"
3818 53c776b5 Iustin Pop
    else:
3819 53c776b5 Iustin Pop
      msg = "single-master"
3820 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3821 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3822 53c776b5 Iustin Pop
                                           self.instance.disks,
3823 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3824 53c776b5 Iustin Pop
    for node, nres in result.items():
3825 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
3826 53c776b5 Iustin Pop
3827 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3828 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3829 53c776b5 Iustin Pop

3830 53c776b5 Iustin Pop
    The cleanup is done by:
3831 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3832 53c776b5 Iustin Pop
        (and update the config if needed)
3833 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3834 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3835 53c776b5 Iustin Pop
      - disconnect from the network
3836 53c776b5 Iustin Pop
      - change disks into single-master mode
3837 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3838 53c776b5 Iustin Pop

3839 53c776b5 Iustin Pop
    """
3840 53c776b5 Iustin Pop
    instance = self.instance
3841 53c776b5 Iustin Pop
    target_node = self.target_node
3842 53c776b5 Iustin Pop
    source_node = self.source_node
3843 53c776b5 Iustin Pop
3844 53c776b5 Iustin Pop
    # check running on only one node
3845 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3846 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3847 53c776b5 Iustin Pop
                     " a bad state)")
3848 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3849 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3850 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
3851 53c776b5 Iustin Pop
3852 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
3853 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
3854 53c776b5 Iustin Pop
3855 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3856 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3857 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3858 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3859 53c776b5 Iustin Pop
                               " and restart this operation.")
3860 53c776b5 Iustin Pop
3861 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3862 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3863 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3864 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3865 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3866 53c776b5 Iustin Pop
3867 53c776b5 Iustin Pop
    if runningon_target:
3868 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3869 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3870 53c776b5 Iustin Pop
                       " updating config" % target_node)
3871 53c776b5 Iustin Pop
      instance.primary_node = target_node
3872 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3873 53c776b5 Iustin Pop
      demoted_node = source_node
3874 53c776b5 Iustin Pop
    else:
3875 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
3876 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
3877 53c776b5 Iustin Pop
      demoted_node = target_node
3878 53c776b5 Iustin Pop
3879 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
3880 53c776b5 Iustin Pop
    try:
3881 53c776b5 Iustin Pop
      self._WaitUntilSync()
3882 53c776b5 Iustin Pop
    except errors.OpExecError:
3883 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
3884 53c776b5 Iustin Pop
      # won't be able to sync
3885 53c776b5 Iustin Pop
      pass
3886 53c776b5 Iustin Pop
    self._GoStandalone()
3887 53c776b5 Iustin Pop
    self._GoReconnect(False)
3888 53c776b5 Iustin Pop
    self._WaitUntilSync()
3889 53c776b5 Iustin Pop
3890 53c776b5 Iustin Pop
    self.feedback_fn("* done")
3891 53c776b5 Iustin Pop
3892 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
3893 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
3894 6906a9d8 Guido Trotter

3895 6906a9d8 Guido Trotter
    """
3896 6906a9d8 Guido Trotter
    target_node = self.target_node
3897 6906a9d8 Guido Trotter
    try:
3898 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
3899 6906a9d8 Guido Trotter
      self._GoStandalone()
3900 6906a9d8 Guido Trotter
      self._GoReconnect(False)
3901 6906a9d8 Guido Trotter
      self._WaitUntilSync()
3902 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
3903 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
3904 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
3905 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
3906 6906a9d8 Guido Trotter
                      str(err))
3907 6906a9d8 Guido Trotter
3908 6906a9d8 Guido Trotter
  def _AbortMigration(self):
3909 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
3910 6906a9d8 Guido Trotter

3911 6906a9d8 Guido Trotter
    """
3912 6906a9d8 Guido Trotter
    instance = self.instance
3913 6906a9d8 Guido Trotter
    target_node = self.target_node
3914 6906a9d8 Guido Trotter
    migration_info = self.migration_info
3915 6906a9d8 Guido Trotter
3916 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
3917 6906a9d8 Guido Trotter
                                                    instance,
3918 6906a9d8 Guido Trotter
                                                    migration_info,
3919 6906a9d8 Guido Trotter
                                                    False)
3920 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
3921 6906a9d8 Guido Trotter
    if abort_msg:
3922 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
3923 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
3924 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
3925 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
3926 6906a9d8 Guido Trotter
3927 53c776b5 Iustin Pop
  def _ExecMigration(self):
3928 53c776b5 Iustin Pop
    """Migrate an instance.
3929 53c776b5 Iustin Pop

3930 53c776b5 Iustin Pop
    The migrate is done by:
3931 53c776b5 Iustin Pop
      - change the disks into dual-master mode
3932 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
3933 53c776b5 Iustin Pop
      - migrate the instance
3934 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
3935 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3936 53c776b5 Iustin Pop
      - change disks into single-master mode
3937 53c776b5 Iustin Pop

3938 53c776b5 Iustin Pop
    """
3939 53c776b5 Iustin Pop
    instance = self.instance
3940 53c776b5 Iustin Pop
    target_node = self.target_node
3941 53c776b5 Iustin Pop
    source_node = self.source_node
3942 53c776b5 Iustin Pop
3943 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
3944 53c776b5 Iustin Pop
    for dev in instance.disks:
3945 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3946 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
3947 53c776b5 Iustin Pop
                                 " synchronized on target node,"
3948 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
3949 53c776b5 Iustin Pop
3950 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
3951 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
3952 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3953 6906a9d8 Guido Trotter
    if msg:
3954 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
3955 0959c824 Iustin Pop
                 (source_node, msg))
3956 6906a9d8 Guido Trotter
      logging.error(log_err)
3957 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
3958 6906a9d8 Guido Trotter
3959 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
3960 6906a9d8 Guido Trotter
3961 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
3962 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
3963 53c776b5 Iustin Pop
    self._GoStandalone()
3964 53c776b5 Iustin Pop
    self._GoReconnect(True)
3965 53c776b5 Iustin Pop
    self._WaitUntilSync()
3966 53c776b5 Iustin Pop
3967 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
3968 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
3969 6906a9d8 Guido Trotter
                                           instance,
3970 6906a9d8 Guido Trotter
                                           migration_info,
3971 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
3972 6906a9d8 Guido Trotter
3973 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3974 6906a9d8 Guido Trotter
    if msg:
3975 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
3976 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
3977 6906a9d8 Guido Trotter
      self._AbortMigration()
3978 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3979 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
3980 6906a9d8 Guido Trotter
                               (instance.name, msg))
3981 6906a9d8 Guido Trotter
3982 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
3983 53c776b5 Iustin Pop
    time.sleep(10)
3984 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
3985 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
3986 53c776b5 Iustin Pop
                                            self.op.live)
3987 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3988 53c776b5 Iustin Pop
    if msg:
3989 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
3990 53c776b5 Iustin Pop
                    " disk status: %s", msg)
3991 6906a9d8 Guido Trotter
      self._AbortMigration()
3992 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
3993 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
3994 53c776b5 Iustin Pop
                               (instance.name, msg))
3995 53c776b5 Iustin Pop
    time.sleep(10)
3996 53c776b5 Iustin Pop
3997 53c776b5 Iustin Pop
    instance.primary_node = target_node
3998 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
3999 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4000 53c776b5 Iustin Pop
4001 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4002 6906a9d8 Guido Trotter
                                              instance,
4003 6906a9d8 Guido Trotter
                                              migration_info,
4004 6906a9d8 Guido Trotter
                                              True)
4005 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4006 6906a9d8 Guido Trotter
    if msg:
4007 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4008 6906a9d8 Guido Trotter
                    " %s" % msg)
4009 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4010 6906a9d8 Guido Trotter
                               msg)
4011 6906a9d8 Guido Trotter
4012 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4013 53c776b5 Iustin Pop
    self._WaitUntilSync()
4014 53c776b5 Iustin Pop
    self._GoStandalone()
4015 53c776b5 Iustin Pop
    self._GoReconnect(False)
4016 53c776b5 Iustin Pop
    self._WaitUntilSync()
4017 53c776b5 Iustin Pop
4018 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4019 53c776b5 Iustin Pop
4020 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4021 53c776b5 Iustin Pop
    """Perform the migration.
4022 53c776b5 Iustin Pop

4023 53c776b5 Iustin Pop
    """
4024 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4025 53c776b5 Iustin Pop
4026 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4027 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4028 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4029 53c776b5 Iustin Pop
    self.nodes_ip = {
4030 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4031 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4032 53c776b5 Iustin Pop
      }
4033 53c776b5 Iustin Pop
    if self.op.cleanup:
4034 53c776b5 Iustin Pop
      return self._ExecCleanup()
4035 53c776b5 Iustin Pop
    else:
4036 53c776b5 Iustin Pop
      return self._ExecMigration()
4037 53c776b5 Iustin Pop
4038 53c776b5 Iustin Pop
4039 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4040 428958aa Iustin Pop
                    info, force_open):
4041 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4042 a8083063 Iustin Pop

4043 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4044 a8083063 Iustin Pop
  all its children.
4045 a8083063 Iustin Pop

4046 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4047 a8083063 Iustin Pop

4048 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4049 428958aa Iustin Pop
  @param node: the node on which to create the device
4050 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4051 428958aa Iustin Pop
  @param instance: the instance which owns the device
4052 428958aa Iustin Pop
  @type device: L{objects.Disk}
4053 428958aa Iustin Pop
  @param device: the device to create
4054 428958aa Iustin Pop
  @type force_create: boolean
4055 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4056 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4057 428958aa Iustin Pop
      CreateOnSecondary() attribute
4058 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4059 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4060 428958aa Iustin Pop
  @type force_open: boolean
4061 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4062 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4063 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4064 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4065 428958aa Iustin Pop

4066 a8083063 Iustin Pop
  """
4067 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4068 428958aa Iustin Pop
    force_create = True
4069 796cab27 Iustin Pop
4070 a8083063 Iustin Pop
  if device.children:
4071 a8083063 Iustin Pop
    for child in device.children:
4072 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4073 428958aa Iustin Pop
                      info, force_open)
4074 a8083063 Iustin Pop
4075 428958aa Iustin Pop
  if not force_create:
4076 796cab27 Iustin Pop
    return
4077 796cab27 Iustin Pop
4078 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4079 de12473a Iustin Pop
4080 de12473a Iustin Pop
4081 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4082 de12473a Iustin Pop
  """Create a single block device on a given node.
4083 de12473a Iustin Pop

4084 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4085 de12473a Iustin Pop
  created in advance.
4086 de12473a Iustin Pop

4087 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4088 de12473a Iustin Pop
  @param node: the node on which to create the device
4089 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4090 de12473a Iustin Pop
  @param instance: the instance which owns the device
4091 de12473a Iustin Pop
  @type device: L{objects.Disk}
4092 de12473a Iustin Pop
  @param device: the device to create
4093 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4094 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4095 de12473a Iustin Pop
  @type force_open: boolean
4096 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4097 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4098 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4099 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4100 de12473a Iustin Pop

4101 de12473a Iustin Pop
  """
4102 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4103 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4104 428958aa Iustin Pop
                                       instance.name, force_open, info)
4105 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
4106 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
4107 a8083063 Iustin Pop
  if device.physical_id is None:
4108 0959c824 Iustin Pop
    device.physical_id = result.payload
4109 a8083063 Iustin Pop
4110 a8083063 Iustin Pop
4111 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4112 923b1523 Iustin Pop
  """Generate a suitable LV name.
4113 923b1523 Iustin Pop

4114 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4115 923b1523 Iustin Pop

4116 923b1523 Iustin Pop
  """
4117 923b1523 Iustin Pop
  results = []
4118 923b1523 Iustin Pop
  for val in exts:
4119 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4120 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4121 923b1523 Iustin Pop
  return results
4122 923b1523 Iustin Pop
4123 923b1523 Iustin Pop
4124 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4125 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4126 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4127 a1f445d3 Iustin Pop

4128 a1f445d3 Iustin Pop
  """
4129 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4130 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4131 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4132 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4133 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4134 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4135 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4136 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4137 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4138 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4139 f9518d38 Iustin Pop
                                      shared_secret),
4140 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4141 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4142 a1f445d3 Iustin Pop
  return drbd_dev
4143 a1f445d3 Iustin Pop
4144 7c0d6283 Michael Hanselmann
4145 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4146 a8083063 Iustin Pop
                          instance_name, primary_node,
4147 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4148 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4149 e2a65344 Iustin Pop
                          base_index):
4150 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4151 a8083063 Iustin Pop

4152 a8083063 Iustin Pop
  """
4153 a8083063 Iustin Pop
  #TODO: compute space requirements
4154 a8083063 Iustin Pop
4155 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4156 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4157 08db7c5c Iustin Pop
  disks = []
4158 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4159 08db7c5c Iustin Pop
    pass
4160 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4161 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4162 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4163 923b1523 Iustin Pop
4164 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4165 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4166 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4167 e2a65344 Iustin Pop
      disk_index = idx + base_index
4168 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4169 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4170 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4171 6ec66eae Iustin Pop
                              mode=disk["mode"])
4172 08db7c5c Iustin Pop
      disks.append(disk_dev)
4173 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4174 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4175 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4176 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4177 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4178 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4179 08db7c5c Iustin Pop
4180 e6c1ff2f Iustin Pop
    names = []
4181 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4182 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4183 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4184 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4185 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4186 112050d9 Iustin Pop
      disk_index = idx + base_index
4187 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4188 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4189 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4190 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4191 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4192 08db7c5c Iustin Pop
      disks.append(disk_dev)
4193 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4194 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4195 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4196 0f1a06e3 Manuel Franceschini
4197 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4198 112050d9 Iustin Pop
      disk_index = idx + base_index
4199 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4200 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4201 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4202 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4203 43e99cff Guido Trotter
                                                         disk_index)),
4204 6ec66eae Iustin Pop
                              mode=disk["mode"])
4205 08db7c5c Iustin Pop
      disks.append(disk_dev)
4206 a8083063 Iustin Pop
  else:
4207 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4208 a8083063 Iustin Pop
  return disks
4209 a8083063 Iustin Pop
4210 a8083063 Iustin Pop
4211 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4212 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4213 3ecf6786 Iustin Pop

4214 3ecf6786 Iustin Pop
  """
4215 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4216 a0c3fea1 Michael Hanselmann
4217 a0c3fea1 Michael Hanselmann
4218 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4219 a8083063 Iustin Pop
  """Create all disks for an instance.
4220 a8083063 Iustin Pop

4221 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4222 a8083063 Iustin Pop

4223 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4224 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4225 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4226 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4227 e4376078 Iustin Pop
  @rtype: boolean
4228 e4376078 Iustin Pop
  @return: the success of the creation
4229 a8083063 Iustin Pop

4230 a8083063 Iustin Pop
  """
4231 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4232 428958aa Iustin Pop
  pnode = instance.primary_node
4233 a0c3fea1 Michael Hanselmann
4234 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4235 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4236 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4237 0f1a06e3 Manuel Franceschini
4238 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
4239 4c4e4e1e Iustin Pop
                 " node %s: %s" % (file_storage_dir, pnode))
4240 0f1a06e3 Manuel Franceschini
4241 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4242 24991749 Iustin Pop
  # LUSetInstanceParams
4243 a8083063 Iustin Pop
  for device in instance.disks:
4244 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4245 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4246 a8083063 Iustin Pop
    #HARDCODE
4247 428958aa Iustin Pop
    for node in instance.all_nodes:
4248 428958aa Iustin Pop
      f_create = node == pnode
4249 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4250 a8083063 Iustin Pop
4251 a8083063 Iustin Pop
4252 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4253 a8083063 Iustin Pop
  """Remove all disks for an instance.
4254 a8083063 Iustin Pop

4255 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4256 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4257 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4258 a8083063 Iustin Pop
  with `_CreateDisks()`).
4259 a8083063 Iustin Pop

4260 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4261 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4262 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4263 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4264 e4376078 Iustin Pop
  @rtype: boolean
4265 e4376078 Iustin Pop
  @return: the success of the removal
4266 a8083063 Iustin Pop

4267 a8083063 Iustin Pop
  """
4268 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4269 a8083063 Iustin Pop
4270 e1bc0878 Iustin Pop
  all_result = True
4271 a8083063 Iustin Pop
  for device in instance.disks:
4272 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4273 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4274 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4275 e1bc0878 Iustin Pop
      if msg:
4276 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4277 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4278 e1bc0878 Iustin Pop
        all_result = False
4279 0f1a06e3 Manuel Franceschini
4280 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4281 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4282 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4283 781de953 Iustin Pop
                                                 file_storage_dir)
4284 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4285 b2b8bcce Iustin Pop
    if msg:
4286 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4287 b2b8bcce Iustin Pop
                    file_storage_dir, instance.primary_node, msg)
4288 e1bc0878 Iustin Pop
      all_result = False
4289 0f1a06e3 Manuel Franceschini
4290 e1bc0878 Iustin Pop
  return all_result
4291 a8083063 Iustin Pop
4292 a8083063 Iustin Pop
4293 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4294 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4295 e2fe6369 Iustin Pop

4296 e2fe6369 Iustin Pop
  """
4297 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4298 e2fe6369 Iustin Pop
  req_size_dict = {
4299 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4300 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4301 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4302 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4303 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4304 e2fe6369 Iustin Pop
  }
4305 e2fe6369 Iustin Pop
4306 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4307 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4308 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4309 e2fe6369 Iustin Pop
4310 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4311 e2fe6369 Iustin Pop
4312 e2fe6369 Iustin Pop
4313 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4314 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4315 74409b12 Iustin Pop

4316 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4317 74409b12 Iustin Pop
  used in both instance create and instance modify.
4318 74409b12 Iustin Pop

4319 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4320 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4321 74409b12 Iustin Pop
  @type nodenames: list
4322 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4323 74409b12 Iustin Pop
  @type hvname: string
4324 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4325 74409b12 Iustin Pop
  @type hvparams: dict
4326 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4327 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4328 74409b12 Iustin Pop

4329 74409b12 Iustin Pop
  """
4330 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4331 74409b12 Iustin Pop
                                                  hvname,
4332 74409b12 Iustin Pop
                                                  hvparams)
4333 74409b12 Iustin Pop
  for node in nodenames:
4334 781de953 Iustin Pop
    info = hvinfo[node]
4335 68c6f21c Iustin Pop
    if info.offline:
4336 68c6f21c Iustin Pop
      continue
4337 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4338 74409b12 Iustin Pop
4339 74409b12 Iustin Pop
4340 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4341 a8083063 Iustin Pop
  """Create an instance.
4342 a8083063 Iustin Pop

4343 a8083063 Iustin Pop
  """
4344 a8083063 Iustin Pop
  HPATH = "instance-add"
4345 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4346 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4347 08db7c5c Iustin Pop
              "mode", "start",
4348 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4349 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4350 7baf741d Guido Trotter
  REQ_BGL = False
4351 7baf741d Guido Trotter
4352 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4353 7baf741d Guido Trotter
    """Expands and checks one node name.
4354 7baf741d Guido Trotter

4355 7baf741d Guido Trotter
    """
4356 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4357 7baf741d Guido Trotter
    if node_full is None:
4358 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4359 7baf741d Guido Trotter
    return node_full
4360 7baf741d Guido Trotter
4361 7baf741d Guido Trotter
  def ExpandNames(self):
4362 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4363 7baf741d Guido Trotter

4364 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4365 7baf741d Guido Trotter

4366 7baf741d Guido Trotter
    """
4367 7baf741d Guido Trotter
    self.needed_locks = {}
4368 7baf741d Guido Trotter
4369 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4370 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4371 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4372 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4373 7baf741d Guido Trotter
4374 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4375 4b2f38dd Iustin Pop
4376 7baf741d Guido Trotter
    # verify creation mode
4377 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4378 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4379 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4380 7baf741d Guido Trotter
                                 self.op.mode)
4381 4b2f38dd Iustin Pop
4382 7baf741d Guido Trotter
    # disk template and mirror node verification
4383 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4384 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4385 7baf741d Guido Trotter
4386 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4387 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4388 4b2f38dd Iustin Pop
4389 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4390 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4391 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4392 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4393 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4394 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4395 4b2f38dd Iustin Pop
4396 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4397 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4398 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4399 8705eb96 Iustin Pop
                                  self.op.hvparams)
4400 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4401 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4402 6785674e Iustin Pop
4403 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4404 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4405 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4406 338e51e8 Iustin Pop
                                    self.op.beparams)
4407 338e51e8 Iustin Pop
4408 7baf741d Guido Trotter
    #### instance parameters check
4409 7baf741d Guido Trotter
4410 7baf741d Guido Trotter
    # instance name verification
4411 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4412 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4413 7baf741d Guido Trotter
4414 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4415 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4416 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4417 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4418 7baf741d Guido Trotter
                                 instance_name)
4419 7baf741d Guido Trotter
4420 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4421 7baf741d Guido Trotter
4422 08db7c5c Iustin Pop
    # NIC buildup
4423 08db7c5c Iustin Pop
    self.nics = []
4424 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
4425 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
4426 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
4427 9dce4771 Guido Trotter
      if nic_mode is None:
4428 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4429 9dce4771 Guido Trotter
4430 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
4431 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4432 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
4433 9dce4771 Guido Trotter
      else:
4434 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
4435 9dce4771 Guido Trotter
4436 08db7c5c Iustin Pop
      # ip validity checks
4437 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
4438 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
4439 08db7c5c Iustin Pop
        nic_ip = None
4440 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4441 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4442 08db7c5c Iustin Pop
      else:
4443 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4444 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4445 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4446 08db7c5c Iustin Pop
        nic_ip = ip
4447 08db7c5c Iustin Pop
4448 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
4449 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4450 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4451 9dce4771 Guido Trotter
4452 08db7c5c Iustin Pop
      # MAC address verification
4453 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4454 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4455 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4456 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4457 08db7c5c Iustin Pop
                                     mac)
4458 08db7c5c Iustin Pop
      # bridge verification
4459 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4460 9dce4771 Guido Trotter
      link = nic.get("link", None)
4461 9dce4771 Guido Trotter
      if bridge and link:
4462 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
4463 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4464 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4465 9dce4771 Guido Trotter
      elif bridge:
4466 9dce4771 Guido Trotter
        link = bridge
4467 9dce4771 Guido Trotter
4468 9dce4771 Guido Trotter
      nicparams = {}
4469 9dce4771 Guido Trotter
      if nic_mode_req:
4470 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
4471 9dce4771 Guido Trotter
      if link:
4472 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
4473 9dce4771 Guido Trotter
4474 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4475 9dce4771 Guido Trotter
                                      nicparams)
4476 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
4477 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4478 08db7c5c Iustin Pop
4479 08db7c5c Iustin Pop
    # disk checks/pre-build
4480 08db7c5c Iustin Pop
    self.disks = []
4481 08db7c5c Iustin Pop
    for disk in self.op.disks:
4482 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4483 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4484 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4485 08db7c5c Iustin Pop
                                   mode)
4486 08db7c5c Iustin Pop
      size = disk.get("size", None)
4487 08db7c5c Iustin Pop
      if size is None:
4488 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4489 08db7c5c Iustin Pop
      try:
4490 08db7c5c Iustin Pop
        size = int(size)
4491 08db7c5c Iustin Pop
      except ValueError:
4492 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4493 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4494 08db7c5c Iustin Pop
4495 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4496 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4497 7baf741d Guido Trotter
4498 7baf741d Guido Trotter
    # file storage checks
4499 7baf741d Guido Trotter
    if (self.op.file_driver and
4500 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4501 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4502 7baf741d Guido Trotter
                                 self.op.file_driver)
4503 7baf741d Guido Trotter
4504 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4505 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4506 7baf741d Guido Trotter
4507 7baf741d Guido Trotter
    ### Node/iallocator related checks
4508 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4509 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4510 7baf741d Guido Trotter
                                 " node must be given")
4511 7baf741d Guido Trotter
4512 7baf741d Guido Trotter
    if self.op.iallocator:
4513 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4514 7baf741d Guido Trotter
    else:
4515 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4516 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4517 7baf741d Guido Trotter
      if self.op.snode is not None:
4518 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4519 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4520 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4521 7baf741d Guido Trotter
4522 7baf741d Guido Trotter
    # in case of import lock the source node too
4523 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4524 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4525 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4526 7baf741d Guido Trotter
4527 b9322a9f Guido Trotter
      if src_path is None:
4528 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4529 b9322a9f Guido Trotter
4530 b9322a9f Guido Trotter
      if src_node is None:
4531 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4532 b9322a9f Guido Trotter
        self.op.src_node = None
4533 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4534 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4535 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4536 b9322a9f Guido Trotter
      else:
4537 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4538 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4539 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4540 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4541 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4542 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4543 7baf741d Guido Trotter
4544 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4545 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4546 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4547 a8083063 Iustin Pop
4548 538475ca Iustin Pop
  def _RunAllocator(self):
4549 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4550 538475ca Iustin Pop

4551 538475ca Iustin Pop
    """
4552 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4553 72737a7f Iustin Pop
    ial = IAllocator(self,
4554 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4555 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4556 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4557 d1c2dd75 Iustin Pop
                     tags=[],
4558 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4559 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4560 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4561 08db7c5c Iustin Pop
                     disks=self.disks,
4562 d1c2dd75 Iustin Pop
                     nics=nics,
4563 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4564 29859cb7 Iustin Pop
                     )
4565 d1c2dd75 Iustin Pop
4566 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4567 d1c2dd75 Iustin Pop
4568 d1c2dd75 Iustin Pop
    if not ial.success:
4569 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4570 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4571 d1c2dd75 Iustin Pop
                                                           ial.info))
4572 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4573 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4574 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4575 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4576 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4577 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4578 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4579 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4580 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4581 27579978 Iustin Pop
    if ial.required_nodes == 2:
4582 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4583 538475ca Iustin Pop
4584 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4585 a8083063 Iustin Pop
    """Build hooks env.
4586 a8083063 Iustin Pop

4587 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4588 a8083063 Iustin Pop

4589 a8083063 Iustin Pop
    """
4590 a8083063 Iustin Pop
    env = {
4591 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4592 a8083063 Iustin Pop
      }
4593 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4594 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4595 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4596 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4597 396e1b78 Michael Hanselmann
4598 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4599 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4600 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4601 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4602 4978db17 Iustin Pop
      status=self.op.start,
4603 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4604 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4605 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4606 62f0dd02 Guido Trotter
      nics=_PreBuildNICHooksList(self, self.nics),
4607 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4608 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4609 396e1b78 Michael Hanselmann
    ))
4610 a8083063 Iustin Pop
4611 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4612 a8083063 Iustin Pop
          self.secondaries)
4613 a8083063 Iustin Pop
    return env, nl, nl
4614 a8083063 Iustin Pop
4615 a8083063 Iustin Pop
4616 a8083063 Iustin Pop
  def CheckPrereq(self):
4617 a8083063 Iustin Pop
    """Check prerequisites.
4618 a8083063 Iustin Pop

4619 a8083063 Iustin Pop
    """
4620 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4621 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4622 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4623 eedc99de Manuel Franceschini
                                 " instances")
4624 eedc99de Manuel Franceschini
4625 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4626 7baf741d Guido Trotter
      src_node = self.op.src_node
4627 7baf741d Guido Trotter
      src_path = self.op.src_path
4628 a8083063 Iustin Pop
4629 c0cbdc67 Guido Trotter
      if src_node is None:
4630 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4631 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
4632 c0cbdc67 Guido Trotter
        found = False
4633 c0cbdc67 Guido Trotter
        for node in exp_list:
4634 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
4635 1b7bfbb7 Iustin Pop
            continue
4636 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
4637 c0cbdc67 Guido Trotter
            found = True
4638 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4639 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4640 c0cbdc67 Guido Trotter
                                                       src_path)
4641 c0cbdc67 Guido Trotter
            break
4642 c0cbdc67 Guido Trotter
        if not found:
4643 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4644 c0cbdc67 Guido Trotter
                                      src_path)
4645 c0cbdc67 Guido Trotter
4646 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4647 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4648 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
4649 a8083063 Iustin Pop
4650 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4651 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4652 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4653 a8083063 Iustin Pop
4654 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4655 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4656 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4657 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4658 a8083063 Iustin Pop
4659 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4660 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4661 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4662 09acf207 Guido Trotter
      if instance_disks < export_disks:
4663 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4664 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4665 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4666 a8083063 Iustin Pop
4667 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4668 09acf207 Guido Trotter
      disk_images = []
4669 09acf207 Guido Trotter
      for idx in range(export_disks):
4670 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4671 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4672 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4673 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4674 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4675 09acf207 Guido Trotter
          disk_images.append(image)
4676 09acf207 Guido Trotter
        else:
4677 09acf207 Guido Trotter
          disk_images.append(False)
4678 09acf207 Guido Trotter
4679 09acf207 Guido Trotter
      self.src_images = disk_images
4680 901a65c1 Iustin Pop
4681 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4682 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4683 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4684 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4685 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4686 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4687 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4688 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4689 bc89efc3 Guido Trotter
4690 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4691 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4692 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4693 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4694 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4695 901a65c1 Iustin Pop
4696 901a65c1 Iustin Pop
    if self.op.ip_check:
4697 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4698 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4699 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4700 901a65c1 Iustin Pop
4701 295728df Guido Trotter
    #### mac address generation
4702 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4703 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4704 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4705 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4706 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4707 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4708 295728df Guido Trotter
    # creation job will fail.
4709 295728df Guido Trotter
    for nic in self.nics:
4710 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4711 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4712 295728df Guido Trotter
4713 538475ca Iustin Pop
    #### allocator run
4714 538475ca Iustin Pop
4715 538475ca Iustin Pop
    if self.op.iallocator is not None:
4716 538475ca Iustin Pop
      self._RunAllocator()
4717 0f1a06e3 Manuel Franceschini
4718 901a65c1 Iustin Pop
    #### node related checks
4719 901a65c1 Iustin Pop
4720 901a65c1 Iustin Pop
    # check primary node
4721 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4722 7baf741d Guido Trotter
    assert self.pnode is not None, \
4723 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4724 7527a8a4 Iustin Pop
    if pnode.offline:
4725 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4726 7527a8a4 Iustin Pop
                                 pnode.name)
4727 733a2b6a Iustin Pop
    if pnode.drained:
4728 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4729 733a2b6a Iustin Pop
                                 pnode.name)
4730 7527a8a4 Iustin Pop
4731 901a65c1 Iustin Pop
    self.secondaries = []
4732 901a65c1 Iustin Pop
4733 901a65c1 Iustin Pop
    # mirror node verification
4734 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4735 7baf741d Guido Trotter
      if self.op.snode is None:
4736 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4737 3ecf6786 Iustin Pop
                                   " a mirror node")
4738 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4739 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4740 3ecf6786 Iustin Pop
                                   " the primary node.")
4741 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4742 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4743 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4744 a8083063 Iustin Pop
4745 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4746 6785674e Iustin Pop
4747 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4748 08db7c5c Iustin Pop
                                self.disks)
4749 ed1ebc60 Guido Trotter
4750 8d75db10 Iustin Pop
    # Check lv size requirements
4751 8d75db10 Iustin Pop
    if req_size is not None:
4752 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4753 72737a7f Iustin Pop
                                         self.op.hypervisor)
4754 8d75db10 Iustin Pop
      for node in nodenames:
4755 781de953 Iustin Pop
        info = nodeinfo[node]
4756 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
4757 070e998b Iustin Pop
        info = info.payload
4758 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4759 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4760 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4761 8d75db10 Iustin Pop
                                     " node %s" % node)
4762 070e998b Iustin Pop
        if req_size > vg_free:
4763 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4764 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4765 070e998b Iustin Pop
                                     (node, vg_free, req_size))
4766 ed1ebc60 Guido Trotter
4767 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4768 6785674e Iustin Pop
4769 a8083063 Iustin Pop
    # os verification
4770 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4771 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4772 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
4773 a8083063 Iustin Pop
4774 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4775 a8083063 Iustin Pop
4776 49ce1563 Iustin Pop
    # memory check on primary node
4777 49ce1563 Iustin Pop
    if self.op.start:
4778 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4779 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4780 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4781 338e51e8 Iustin Pop
                           self.op.hypervisor)
4782 49ce1563 Iustin Pop
4783 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4784 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4785 a8083063 Iustin Pop

4786 a8083063 Iustin Pop
    """
4787 a8083063 Iustin Pop
    instance = self.op.instance_name
4788 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4789 a8083063 Iustin Pop
4790 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4791 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4792 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4793 2a6469d5 Alexander Schreiber
    else:
4794 2a6469d5 Alexander Schreiber
      network_port = None
4795 58acb49d Alexander Schreiber
4796 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4797 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4798 31a853d2 Iustin Pop
4799 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4800 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4801 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4802 2c313123 Manuel Franceschini
    else:
4803 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4804 2c313123 Manuel Franceschini
4805 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4806 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4807 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4808 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4809 0f1a06e3 Manuel Franceschini
4810 0f1a06e3 Manuel Franceschini
4811 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4812 a8083063 Iustin Pop
                                  self.op.disk_template,
4813 a8083063 Iustin Pop
                                  instance, pnode_name,
4814 08db7c5c Iustin Pop
                                  self.secondaries,
4815 08db7c5c Iustin Pop
                                  self.disks,
4816 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4817 e2a65344 Iustin Pop
                                  self.op.file_driver,
4818 e2a65344 Iustin Pop
                                  0)
4819 a8083063 Iustin Pop
4820 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4821 a8083063 Iustin Pop
                            primary_node=pnode_name,
4822 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4823 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4824 4978db17 Iustin Pop
                            admin_up=False,
4825 58acb49d Alexander Schreiber
                            network_port=network_port,
4826 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4827 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4828 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4829 a8083063 Iustin Pop
                            )
4830 a8083063 Iustin Pop
4831 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4832 796cab27 Iustin Pop
    try:
4833 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4834 796cab27 Iustin Pop
    except errors.OpExecError:
4835 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4836 796cab27 Iustin Pop
      try:
4837 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4838 796cab27 Iustin Pop
      finally:
4839 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4840 796cab27 Iustin Pop
        raise
4841 a8083063 Iustin Pop
4842 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4843 a8083063 Iustin Pop
4844 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4845 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4846 7baf741d Guido Trotter
    # added the instance to the config
4847 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4848 e36e96b4 Guido Trotter
    # Unlock all the nodes
4849 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4850 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4851 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4852 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4853 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4854 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4855 9c8971d7 Guido Trotter
    else:
4856 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4857 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4858 a8083063 Iustin Pop
4859 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4860 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4861 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4862 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4863 a8083063 Iustin Pop
      time.sleep(15)
4864 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4865 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4866 a8083063 Iustin Pop
    else:
4867 a8083063 Iustin Pop
      disk_abort = False
4868 a8083063 Iustin Pop
4869 a8083063 Iustin Pop
    if disk_abort:
4870 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
4871 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
4872 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
4873 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
4874 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
4875 3ecf6786 Iustin Pop
                               " this instance")
4876 a8083063 Iustin Pop
4877 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
4878 a8083063 Iustin Pop
                (instance, pnode_name))
4879 a8083063 Iustin Pop
4880 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
4881 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
4882 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
4883 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4884 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
4885 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
4886 a8083063 Iustin Pop
4887 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
4888 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
4889 a8083063 Iustin Pop
        src_node = self.op.src_node
4890 09acf207 Guido Trotter
        src_images = self.src_images
4891 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
4892 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4893 09acf207 Guido Trotter
                                                         src_node, src_images,
4894 6c0af70e Guido Trotter
                                                         cluster_name)
4895 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
4896 944bf548 Iustin Pop
        if msg:
4897 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
4898 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
4899 a8083063 Iustin Pop
      else:
4900 a8083063 Iustin Pop
        # also checked in the prereq part
4901 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
4902 3ecf6786 Iustin Pop
                                     % self.op.mode)
4903 a8083063 Iustin Pop
4904 a8083063 Iustin Pop
    if self.op.start:
4905 4978db17 Iustin Pop
      iobj.admin_up = True
4906 4978db17 Iustin Pop
      self.cfg.Update(iobj)
4907 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4908 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
4909 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4910 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
4911 a8083063 Iustin Pop
4912 a8083063 Iustin Pop
4913 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
4914 a8083063 Iustin Pop
  """Connect to an instance's console.
4915 a8083063 Iustin Pop

4916 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
4917 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
4918 a8083063 Iustin Pop
  console.
4919 a8083063 Iustin Pop

4920 a8083063 Iustin Pop
  """
4921 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4922 8659b73e Guido Trotter
  REQ_BGL = False
4923 8659b73e Guido Trotter
4924 8659b73e Guido Trotter
  def ExpandNames(self):
4925 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
4926 a8083063 Iustin Pop
4927 a8083063 Iustin Pop
  def CheckPrereq(self):
4928 a8083063 Iustin Pop
    """Check prerequisites.
4929 a8083063 Iustin Pop

4930 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
4931 a8083063 Iustin Pop

4932 a8083063 Iustin Pop
    """
4933 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
4934 8659b73e Guido Trotter
    assert self.instance is not None, \
4935 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
4936 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
4937 a8083063 Iustin Pop
4938 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4939 a8083063 Iustin Pop
    """Connect to the console of an instance
4940 a8083063 Iustin Pop

4941 a8083063 Iustin Pop
    """
4942 a8083063 Iustin Pop
    instance = self.instance
4943 a8083063 Iustin Pop
    node = instance.primary_node
4944 a8083063 Iustin Pop
4945 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
4946 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
4947 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
4948 a8083063 Iustin Pop
4949 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
4950 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
4951 a8083063 Iustin Pop
4952 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
4953 a8083063 Iustin Pop
4954 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
4955 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
4956 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
4957 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
4958 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
4959 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
4960 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
4961 b047857b Michael Hanselmann
4962 82122173 Iustin Pop
    # build ssh cmdline
4963 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
4964 a8083063 Iustin Pop
4965 a8083063 Iustin Pop
4966 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
4967 a8083063 Iustin Pop
  """Replace the disks of an instance.
4968 a8083063 Iustin Pop

4969 a8083063 Iustin Pop
  """
4970 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
4971 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4972 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
4973 efd990e4 Guido Trotter
  REQ_BGL = False
4974 efd990e4 Guido Trotter
4975 7e9366f7 Iustin Pop
  def CheckArguments(self):
4976 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
4977 efd990e4 Guido Trotter
      self.op.remote_node = None
4978 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
4979 7e9366f7 Iustin Pop
      self.op.iallocator = None
4980 7e9366f7 Iustin Pop
4981 7e9366f7 Iustin Pop
    # check for valid parameter combination
4982 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
4983 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
4984 7e9366f7 Iustin Pop
      if cnt == 2:
4985 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
4986 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
4987 7e9366f7 Iustin Pop
                                   " new node given")
4988 7e9366f7 Iustin Pop
      elif cnt == 0:
4989 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
4990 efd990e4 Guido Trotter
                                   " secondary, not both")
4991 7e9366f7 Iustin Pop
    else: # not replacing the secondary
4992 7e9366f7 Iustin Pop
      if cnt != 2:
4993 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
4994 7e9366f7 Iustin Pop
                                   " be used only when changing the"
4995 7e9366f7 Iustin Pop
                                   " secondary node")
4996 7e9366f7 Iustin Pop
4997 7e9366f7 Iustin Pop
  def ExpandNames(self):
4998 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
4999 7e9366f7 Iustin Pop
5000 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5001 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5002 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5003 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5004 efd990e4 Guido Trotter
      if remote_node is None:
5005 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5006 efd990e4 Guido Trotter
                                   self.op.remote_node)
5007 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5008 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5009 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5010 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5011 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5012 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5013 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5014 efd990e4 Guido Trotter
    else:
5015 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5016 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5017 efd990e4 Guido Trotter
5018 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5019 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5020 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5021 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5022 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5023 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5024 a8083063 Iustin Pop
5025 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5026 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5027 b6e82a65 Iustin Pop

5028 b6e82a65 Iustin Pop
    """
5029 72737a7f Iustin Pop
    ial = IAllocator(self,
5030 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5031 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5032 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5033 b6e82a65 Iustin Pop
5034 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5035 b6e82a65 Iustin Pop
5036 b6e82a65 Iustin Pop
    if not ial.success:
5037 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5038 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5039 b6e82a65 Iustin Pop
                                                           ial.info))
5040 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5041 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5042 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5043 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5044 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5045 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5046 86d9d3bb Iustin Pop
                 self.op.remote_node)
5047 b6e82a65 Iustin Pop
5048 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5049 a8083063 Iustin Pop
    """Build hooks env.
5050 a8083063 Iustin Pop

5051 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5052 a8083063 Iustin Pop

5053 a8083063 Iustin Pop
    """
5054 a8083063 Iustin Pop
    env = {
5055 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5056 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5057 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5058 a8083063 Iustin Pop
      }
5059 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5060 0834c866 Iustin Pop
    nl = [
5061 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5062 0834c866 Iustin Pop
      self.instance.primary_node,
5063 0834c866 Iustin Pop
      ]
5064 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5065 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5066 a8083063 Iustin Pop
    return env, nl, nl
5067 a8083063 Iustin Pop
5068 a8083063 Iustin Pop
  def CheckPrereq(self):
5069 a8083063 Iustin Pop
    """Check prerequisites.
5070 a8083063 Iustin Pop

5071 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5072 a8083063 Iustin Pop

5073 a8083063 Iustin Pop
    """
5074 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5075 efd990e4 Guido Trotter
    assert instance is not None, \
5076 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5077 a8083063 Iustin Pop
    self.instance = instance
5078 a8083063 Iustin Pop
5079 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5080 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5081 7e9366f7 Iustin Pop
                                 " instances")
5082 a8083063 Iustin Pop
5083 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5084 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5085 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5086 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5087 a8083063 Iustin Pop
5088 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5089 a9e0c397 Iustin Pop
5090 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5091 de8c7666 Guido Trotter
      self._RunAllocator()
5092 b6e82a65 Iustin Pop
5093 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5094 a9e0c397 Iustin Pop
    if remote_node is not None:
5095 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5096 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5097 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5098 a9e0c397 Iustin Pop
    else:
5099 a9e0c397 Iustin Pop
      self.remote_node_info = None
5100 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5101 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5102 3ecf6786 Iustin Pop
                                 " the instance.")
5103 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5104 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5105 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5106 7e9366f7 Iustin Pop
5107 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5108 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5109 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5110 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5111 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5112 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5113 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5114 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5115 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5116 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5117 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5118 7e9366f7 Iustin Pop
    else:
5119 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5120 7e9366f7 Iustin Pop
5121 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5122 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5123 a9e0c397 Iustin Pop
5124 54155f52 Iustin Pop
    if not self.op.disks:
5125 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5126 54155f52 Iustin Pop
5127 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5128 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5129 a8083063 Iustin Pop
5130 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5131 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5132 a9e0c397 Iustin Pop

5133 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5134 e4376078 Iustin Pop

5135 e4376078 Iustin Pop
      1. for each disk to be replaced:
5136 e4376078 Iustin Pop

5137 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5138 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5139 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5140 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5141 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5142 e4376078 Iustin Pop

5143 e4376078 Iustin Pop
      1. wait for sync across all devices
5144 e4376078 Iustin Pop

5145 e4376078 Iustin Pop
      1. for each modified disk:
5146 e4376078 Iustin Pop

5147 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5148 a9e0c397 Iustin Pop

5149 a9e0c397 Iustin Pop
    Failures are not very well handled.
5150 cff90b79 Iustin Pop

5151 a9e0c397 Iustin Pop
    """
5152 cff90b79 Iustin Pop
    steps_total = 6
5153 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5154 a9e0c397 Iustin Pop
    instance = self.instance
5155 a9e0c397 Iustin Pop
    iv_names = {}
5156 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5157 a9e0c397 Iustin Pop
    # start of work
5158 a9e0c397 Iustin Pop
    cfg = self.cfg
5159 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5160 cff90b79 Iustin Pop
    oth_node = self.oth_node
5161 cff90b79 Iustin Pop
5162 cff90b79 Iustin Pop
    # Step: check device activation
5163 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5164 cff90b79 Iustin Pop
    info("checking volume groups")
5165 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5166 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5167 cff90b79 Iustin Pop
    if not results:
5168 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5169 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5170 781de953 Iustin Pop
      res = results[node]
5171 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5172 e480923b Iustin Pop
      if my_vg not in res.payload:
5173 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5174 cff90b79 Iustin Pop
                                 (my_vg, node))
5175 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5176 54155f52 Iustin Pop
      if idx not in self.op.disks:
5177 cff90b79 Iustin Pop
        continue
5178 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5179 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5180 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5181 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5182 4c4e4e1e Iustin Pop
        msg = result.fail_msg
5183 23829f6f Iustin Pop
        if not msg and not result.payload:
5184 23829f6f Iustin Pop
          msg = "disk not found"
5185 23829f6f Iustin Pop
        if msg:
5186 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5187 23829f6f Iustin Pop
                                   (idx, node, msg))
5188 cff90b79 Iustin Pop
5189 cff90b79 Iustin Pop
    # Step: check other node consistency
5190 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5191 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5192 54155f52 Iustin Pop
      if idx not in self.op.disks:
5193 cff90b79 Iustin Pop
        continue
5194 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5195 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5196 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5197 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5198 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5199 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5200 cff90b79 Iustin Pop
5201 cff90b79 Iustin Pop
    # Step: create new storage
5202 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5203 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5204 54155f52 Iustin Pop
      if idx not in self.op.disks:
5205 a9e0c397 Iustin Pop
        continue
5206 a9e0c397 Iustin Pop
      size = dev.size
5207 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5208 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5209 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5210 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5211 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5212 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5213 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5214 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5215 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5216 a9e0c397 Iustin Pop
      old_lvs = dev.children
5217 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5218 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5219 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5220 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5221 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5222 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5223 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5224 a9e0c397 Iustin Pop
5225 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5226 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5227 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5228 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5229 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5230 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
5231 4c4e4e1e Iustin Pop
                   " %s for device %s" % (tgt_node, dev.iv_name))
5232 cff90b79 Iustin Pop
      #dev.children = []
5233 cff90b79 Iustin Pop
      #cfg.Update(instance)
5234 a9e0c397 Iustin Pop
5235 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5236 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5237 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5238 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5239 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5240 cff90b79 Iustin Pop
5241 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5242 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5243 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5244 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5245 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5246 cff90b79 Iustin Pop
      rlist = []
5247 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5248 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5249 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
5250 23829f6f Iustin Pop
          # device exists
5251 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5252 cff90b79 Iustin Pop
5253 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5254 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5255 4c4e4e1e Iustin Pop
      result.Raise("Can't rename old LVs on node %s" % tgt_node)
5256 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5257 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5258 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5259 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5260 4c4e4e1e Iustin Pop
      result.Raise("Can't rename new LVs on node %s" % tgt_node)
5261 cff90b79 Iustin Pop
5262 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5263 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5264 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5265 a9e0c397 Iustin Pop
5266 cff90b79 Iustin Pop
      for disk in old_lvs:
5267 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5268 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5269 a9e0c397 Iustin Pop
5270 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5271 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5272 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5273 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5274 2cc1da8b Iustin Pop
      if msg:
5275 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5276 4c4e4e1e Iustin Pop
          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
5277 4c4e4e1e Iustin Pop
          if msg2:
5278 4c4e4e1e Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg2,
5279 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5280 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5281 a9e0c397 Iustin Pop
5282 a9e0c397 Iustin Pop
      dev.children = new_lvs
5283 a9e0c397 Iustin Pop
      cfg.Update(instance)
5284 a9e0c397 Iustin Pop
5285 cff90b79 Iustin Pop
    # Step: wait for sync
5286 a9e0c397 Iustin Pop
5287 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5288 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5289 a9e0c397 Iustin Pop
    # return value
5290 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5291 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5292 a9e0c397 Iustin Pop
5293 a9e0c397 Iustin Pop
    # so check manually all the devices
5294 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5295 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5296 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5297 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5298 23829f6f Iustin Pop
      if not msg and not result.payload:
5299 23829f6f Iustin Pop
        msg = "disk not found"
5300 23829f6f Iustin Pop
      if msg:
5301 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5302 23829f6f Iustin Pop
                                 (name, msg))
5303 23829f6f Iustin Pop
      if result.payload[5]:
5304 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5305 a9e0c397 Iustin Pop
5306 cff90b79 Iustin Pop
    # Step: remove old storage
5307 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5308 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5309 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5310 a9e0c397 Iustin Pop
      for lv in old_lvs:
5311 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5312 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
5313 e1bc0878 Iustin Pop
        if msg:
5314 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5315 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5316 a9e0c397 Iustin Pop
          continue
5317 a9e0c397 Iustin Pop
5318 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5319 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5320 a9e0c397 Iustin Pop

5321 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5322 a9e0c397 Iustin Pop
      - for all disks of the instance:
5323 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5324 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5325 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5326 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5327 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5328 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5329 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5330 a9e0c397 Iustin Pop
          not network enabled
5331 a9e0c397 Iustin Pop
      - wait for sync across all devices
5332 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5333 a9e0c397 Iustin Pop

5334 a9e0c397 Iustin Pop
    Failures are not very well handled.
5335 0834c866 Iustin Pop

5336 a9e0c397 Iustin Pop
    """
5337 0834c866 Iustin Pop
    steps_total = 6
5338 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5339 a9e0c397 Iustin Pop
    instance = self.instance
5340 a9e0c397 Iustin Pop
    iv_names = {}
5341 a9e0c397 Iustin Pop
    # start of work
5342 a9e0c397 Iustin Pop
    cfg = self.cfg
5343 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5344 a9e0c397 Iustin Pop
    new_node = self.new_node
5345 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5346 a2d59d8b Iustin Pop
    nodes_ip = {
5347 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5348 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5349 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5350 a2d59d8b Iustin Pop
      }
5351 0834c866 Iustin Pop
5352 0834c866 Iustin Pop
    # Step: check device activation
5353 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5354 0834c866 Iustin Pop
    info("checking volume groups")
5355 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5356 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5357 0834c866 Iustin Pop
    for node in pri_node, new_node:
5358 781de953 Iustin Pop
      res = results[node]
5359 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5360 e480923b Iustin Pop
      if my_vg not in res.payload:
5361 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5362 0834c866 Iustin Pop
                                 (my_vg, node))
5363 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5364 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5365 0834c866 Iustin Pop
        continue
5366 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5367 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5368 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5369 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5370 23829f6f Iustin Pop
      if not msg and not result.payload:
5371 23829f6f Iustin Pop
        msg = "disk not found"
5372 23829f6f Iustin Pop
      if msg:
5373 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5374 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5375 0834c866 Iustin Pop
5376 0834c866 Iustin Pop
    # Step: check other node consistency
5377 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5378 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5379 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5380 0834c866 Iustin Pop
        continue
5381 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5382 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5383 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5384 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5385 0834c866 Iustin Pop
                                 pri_node)
5386 0834c866 Iustin Pop
5387 0834c866 Iustin Pop
    # Step: create new storage
5388 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5389 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5390 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5391 d418ebfb Iustin Pop
           (new_node, idx))
5392 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5393 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5394 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5395 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5396 a9e0c397 Iustin Pop
5397 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5398 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5399 a1578d63 Iustin Pop
    # error and the success paths
5400 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5401 a1578d63 Iustin Pop
                                   instance.name)
5402 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5403 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5404 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5405 0834c866 Iustin Pop
      size = dev.size
5406 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5407 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5408 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5409 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5410 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5411 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5412 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5413 a2d59d8b Iustin Pop
        p_minor = o_minor1
5414 ffa1c0dc Iustin Pop
      else:
5415 a2d59d8b Iustin Pop
        p_minor = o_minor2
5416 a2d59d8b Iustin Pop
5417 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5418 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5419 a2d59d8b Iustin Pop
5420 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5421 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5422 a2d59d8b Iustin Pop
                    new_net_id)
5423 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5424 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5425 a9e0c397 Iustin Pop
                              children=dev.children)
5426 796cab27 Iustin Pop
      try:
5427 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5428 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5429 82759cb1 Iustin Pop
      except errors.GenericError:
5430 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5431 796cab27 Iustin Pop
        raise
5432 a9e0c397 Iustin Pop
5433 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5434 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5435 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5436 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5437 4c4e4e1e Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
5438 cacfd1fd Iustin Pop
      if msg:
5439 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5440 cacfd1fd Iustin Pop
                (idx, msg),
5441 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5442 a9e0c397 Iustin Pop
5443 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5444 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5445 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5446 642445d9 Iustin Pop
5447 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5448 a2d59d8b Iustin Pop
    if msg:
5449 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5450 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5451 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5452 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5453 642445d9 Iustin Pop
5454 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5455 642445d9 Iustin Pop
    # the instance to point to the new secondary
5456 642445d9 Iustin Pop
    info("updating instance configuration")
5457 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5458 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5459 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5460 642445d9 Iustin Pop
    cfg.Update(instance)
5461 a9e0c397 Iustin Pop
5462 642445d9 Iustin Pop
    # and now perform the drbd attach
5463 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5464 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5465 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5466 a2d59d8b Iustin Pop
                                           False)
5467 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5468 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
5469 a2d59d8b Iustin Pop
      if msg:
5470 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5471 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5472 a2d59d8b Iustin Pop
                " status of disks")
5473 a9e0c397 Iustin Pop
5474 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5475 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5476 a9e0c397 Iustin Pop
    # return value
5477 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5478 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5479 a9e0c397 Iustin Pop
5480 a9e0c397 Iustin Pop
    # so check manually all the devices
5481 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5482 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5483 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5484 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5485 23829f6f Iustin Pop
      if not msg and not result.payload:
5486 23829f6f Iustin Pop
        msg = "disk not found"
5487 23829f6f Iustin Pop
      if msg:
5488 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5489 23829f6f Iustin Pop
                                 (idx, msg))
5490 23829f6f Iustin Pop
      if result.payload[5]:
5491 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5492 a9e0c397 Iustin Pop
5493 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5494 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5495 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5496 a9e0c397 Iustin Pop
      for lv in old_lvs:
5497 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5498 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
5499 e1bc0878 Iustin Pop
        if msg:
5500 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5501 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5502 a9e0c397 Iustin Pop
5503 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5504 a9e0c397 Iustin Pop
    """Execute disk replacement.
5505 a9e0c397 Iustin Pop

5506 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5507 a9e0c397 Iustin Pop

5508 a9e0c397 Iustin Pop
    """
5509 a9e0c397 Iustin Pop
    instance = self.instance
5510 22985314 Guido Trotter
5511 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5512 0d68c45d Iustin Pop
    if not instance.admin_up:
5513 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5514 22985314 Guido Trotter
5515 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5516 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5517 a9e0c397 Iustin Pop
    else:
5518 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5519 22985314 Guido Trotter
5520 22985314 Guido Trotter
    ret = fn(feedback_fn)
5521 22985314 Guido Trotter
5522 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5523 0d68c45d Iustin Pop
    if not instance.admin_up:
5524 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5525 22985314 Guido Trotter
5526 22985314 Guido Trotter
    return ret
5527 a9e0c397 Iustin Pop
5528 a8083063 Iustin Pop
5529 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5530 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5531 8729e0d7 Iustin Pop

5532 8729e0d7 Iustin Pop
  """
5533 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5534 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5535 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5536 31e63dbf Guido Trotter
  REQ_BGL = False
5537 31e63dbf Guido Trotter
5538 31e63dbf Guido Trotter
  def ExpandNames(self):
5539 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5540 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5541 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5542 31e63dbf Guido Trotter
5543 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5544 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5545 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5546 8729e0d7 Iustin Pop
5547 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5548 8729e0d7 Iustin Pop
    """Build hooks env.
5549 8729e0d7 Iustin Pop

5550 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5551 8729e0d7 Iustin Pop

5552 8729e0d7 Iustin Pop
    """
5553 8729e0d7 Iustin Pop
    env = {
5554 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5555 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5556 8729e0d7 Iustin Pop
      }
5557 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5558 8729e0d7 Iustin Pop
    nl = [
5559 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5560 8729e0d7 Iustin Pop
      self.instance.primary_node,
5561 8729e0d7 Iustin Pop
      ]
5562 8729e0d7 Iustin Pop
    return env, nl, nl
5563 8729e0d7 Iustin Pop
5564 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5565 8729e0d7 Iustin Pop
    """Check prerequisites.
5566 8729e0d7 Iustin Pop

5567 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5568 8729e0d7 Iustin Pop

5569 8729e0d7 Iustin Pop
    """
5570 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5571 31e63dbf Guido Trotter
    assert instance is not None, \
5572 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5573 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5574 6b12959c Iustin Pop
    for node in nodenames:
5575 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5576 7527a8a4 Iustin Pop
5577 31e63dbf Guido Trotter
5578 8729e0d7 Iustin Pop
    self.instance = instance
5579 8729e0d7 Iustin Pop
5580 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5581 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5582 8729e0d7 Iustin Pop
                                 " growing.")
5583 8729e0d7 Iustin Pop
5584 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5585 8729e0d7 Iustin Pop
5586 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5587 72737a7f Iustin Pop
                                       instance.hypervisor)
5588 8729e0d7 Iustin Pop
    for node in nodenames:
5589 781de953 Iustin Pop
      info = nodeinfo[node]
5590 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
5591 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
5592 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5593 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5594 8729e0d7 Iustin Pop
                                   " node %s" % node)
5595 781de953 Iustin Pop
      if self.op.amount > vg_free:
5596 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5597 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5598 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5599 8729e0d7 Iustin Pop
5600 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5601 8729e0d7 Iustin Pop
    """Execute disk grow.
5602 8729e0d7 Iustin Pop

5603 8729e0d7 Iustin Pop
    """
5604 8729e0d7 Iustin Pop
    instance = self.instance
5605 ad24e046 Iustin Pop
    disk = self.disk
5606 6b12959c Iustin Pop
    for node in instance.all_nodes:
5607 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5608 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5609 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
5610 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5611 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5612 6605411d Iustin Pop
    if self.op.wait_for_sync:
5613 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5614 6605411d Iustin Pop
      if disk_abort:
5615 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5616 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5617 8729e0d7 Iustin Pop
5618 8729e0d7 Iustin Pop
5619 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5620 a8083063 Iustin Pop
  """Query runtime instance data.
5621 a8083063 Iustin Pop

5622 a8083063 Iustin Pop
  """
5623 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5624 a987fa48 Guido Trotter
  REQ_BGL = False
5625 ae5849b5 Michael Hanselmann
5626 a987fa48 Guido Trotter
  def ExpandNames(self):
5627 a987fa48 Guido Trotter
    self.needed_locks = {}
5628 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5629 a987fa48 Guido Trotter
5630 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5631 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5632 a987fa48 Guido Trotter
5633 a987fa48 Guido Trotter
    if self.op.instances:
5634 a987fa48 Guido Trotter
      self.wanted_names = []
5635 a987fa48 Guido Trotter
      for name in self.op.instances:
5636 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5637 a987fa48 Guido Trotter
        if full_name is None:
5638 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5639 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5640 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5641 a987fa48 Guido Trotter
    else:
5642 a987fa48 Guido Trotter
      self.wanted_names = None
5643 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5644 a987fa48 Guido Trotter
5645 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5646 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5647 a987fa48 Guido Trotter
5648 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5649 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5650 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5651 a8083063 Iustin Pop
5652 a8083063 Iustin Pop
  def CheckPrereq(self):
5653 a8083063 Iustin Pop
    """Check prerequisites.
5654 a8083063 Iustin Pop

5655 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5656 a8083063 Iustin Pop

5657 a8083063 Iustin Pop
    """
5658 a987fa48 Guido Trotter
    if self.wanted_names is None:
5659 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5660 a8083063 Iustin Pop
5661 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5662 a987fa48 Guido Trotter
                             in self.wanted_names]
5663 a987fa48 Guido Trotter
    return
5664 a8083063 Iustin Pop
5665 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5666 a8083063 Iustin Pop
    """Compute block device status.
5667 a8083063 Iustin Pop

5668 a8083063 Iustin Pop
    """
5669 57821cac Iustin Pop
    static = self.op.static
5670 57821cac Iustin Pop
    if not static:
5671 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5672 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5673 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5674 9854f5d0 Iustin Pop
        dev_pstatus = None
5675 9854f5d0 Iustin Pop
      else:
5676 4c4e4e1e Iustin Pop
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5677 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5678 57821cac Iustin Pop
    else:
5679 57821cac Iustin Pop
      dev_pstatus = None
5680 57821cac Iustin Pop
5681 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5682 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5683 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5684 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5685 a8083063 Iustin Pop
      else:
5686 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5687 a8083063 Iustin Pop
5688 57821cac Iustin Pop
    if snode and not static:
5689 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5690 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5691 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5692 9854f5d0 Iustin Pop
        dev_sstatus = None
5693 9854f5d0 Iustin Pop
      else:
5694 4c4e4e1e Iustin Pop
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5695 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5696 a8083063 Iustin Pop
    else:
5697 a8083063 Iustin Pop
      dev_sstatus = None
5698 a8083063 Iustin Pop
5699 a8083063 Iustin Pop
    if dev.children:
5700 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5701 a8083063 Iustin Pop
                      for child in dev.children]
5702 a8083063 Iustin Pop
    else:
5703 a8083063 Iustin Pop
      dev_children = []
5704 a8083063 Iustin Pop
5705 a8083063 Iustin Pop
    data = {
5706 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5707 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5708 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5709 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5710 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5711 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5712 a8083063 Iustin Pop
      "children": dev_children,
5713 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5714 a8083063 Iustin Pop
      }
5715 a8083063 Iustin Pop
5716 a8083063 Iustin Pop
    return data
5717 a8083063 Iustin Pop
5718 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5719 a8083063 Iustin Pop
    """Gather and return data"""
5720 a8083063 Iustin Pop
    result = {}
5721 338e51e8 Iustin Pop
5722 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5723 338e51e8 Iustin Pop
5724 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5725 57821cac Iustin Pop
      if not self.op.static:
5726 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5727 57821cac Iustin Pop
                                                  instance.name,
5728 57821cac Iustin Pop
                                                  instance.hypervisor)
5729 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5730 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
5731 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5732 57821cac Iustin Pop
          remote_state = "up"
5733 57821cac Iustin Pop
        else:
5734 57821cac Iustin Pop
          remote_state = "down"
5735 a8083063 Iustin Pop
      else:
5736 57821cac Iustin Pop
        remote_state = None
5737 0d68c45d Iustin Pop
      if instance.admin_up:
5738 a8083063 Iustin Pop
        config_state = "up"
5739 0d68c45d Iustin Pop
      else:
5740 0d68c45d Iustin Pop
        config_state = "down"
5741 a8083063 Iustin Pop
5742 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5743 a8083063 Iustin Pop
               for device in instance.disks]
5744 a8083063 Iustin Pop
5745 a8083063 Iustin Pop
      idict = {
5746 a8083063 Iustin Pop
        "name": instance.name,
5747 a8083063 Iustin Pop
        "config_state": config_state,
5748 a8083063 Iustin Pop
        "run_state": remote_state,
5749 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5750 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5751 a8083063 Iustin Pop
        "os": instance.os,
5752 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5753 a8083063 Iustin Pop
        "disks": disks,
5754 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5755 24838135 Iustin Pop
        "network_port": instance.network_port,
5756 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5757 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5758 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5759 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5760 a8083063 Iustin Pop
        }
5761 a8083063 Iustin Pop
5762 a8083063 Iustin Pop
      result[instance.name] = idict
5763 a8083063 Iustin Pop
5764 a8083063 Iustin Pop
    return result
5765 a8083063 Iustin Pop
5766 a8083063 Iustin Pop
5767 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5768 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5769 a8083063 Iustin Pop

5770 a8083063 Iustin Pop
  """
5771 a8083063 Iustin Pop
  HPATH = "instance-modify"
5772 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5773 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5774 1a5c7281 Guido Trotter
  REQ_BGL = False
5775 1a5c7281 Guido Trotter
5776 24991749 Iustin Pop
  def CheckArguments(self):
5777 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5778 24991749 Iustin Pop
      self.op.nics = []
5779 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5780 24991749 Iustin Pop
      self.op.disks = []
5781 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5782 24991749 Iustin Pop
      self.op.beparams = {}
5783 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5784 24991749 Iustin Pop
      self.op.hvparams = {}
5785 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5786 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5787 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5788 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5789 24991749 Iustin Pop
5790 24991749 Iustin Pop
    # Disk validation
5791 24991749 Iustin Pop
    disk_addremove = 0
5792 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5793 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5794 24991749 Iustin Pop
        disk_addremove += 1
5795 24991749 Iustin Pop
        continue
5796 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5797 24991749 Iustin Pop
        disk_addremove += 1
5798 24991749 Iustin Pop
      else:
5799 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5800 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5801 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5802 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5803 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5804 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5805 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5806 24991749 Iustin Pop
        if size is None:
5807 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5808 24991749 Iustin Pop
        try:
5809 24991749 Iustin Pop
          size = int(size)
5810 24991749 Iustin Pop
        except ValueError, err:
5811 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5812 24991749 Iustin Pop
                                     str(err))
5813 24991749 Iustin Pop
        disk_dict['size'] = size
5814 24991749 Iustin Pop
      else:
5815 24991749 Iustin Pop
        # modification of disk
5816 24991749 Iustin Pop
        if 'size' in disk_dict:
5817 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5818 24991749 Iustin Pop
                                     " grow-disk")
5819 24991749 Iustin Pop
5820 24991749 Iustin Pop
    if disk_addremove > 1:
5821 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5822 24991749 Iustin Pop
                                 " supported at a time")
5823 24991749 Iustin Pop
5824 24991749 Iustin Pop
    # NIC validation
5825 24991749 Iustin Pop
    nic_addremove = 0
5826 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5827 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5828 24991749 Iustin Pop
        nic_addremove += 1
5829 24991749 Iustin Pop
        continue
5830 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5831 24991749 Iustin Pop
        nic_addremove += 1
5832 24991749 Iustin Pop
      else:
5833 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5834 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5835 24991749 Iustin Pop
5836 24991749 Iustin Pop
      # nic_dict should be a dict
5837 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5838 24991749 Iustin Pop
      if nic_ip is not None:
5839 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5840 24991749 Iustin Pop
          nic_dict['ip'] = None
5841 24991749 Iustin Pop
        else:
5842 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5843 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5844 5c44da6a Guido Trotter
5845 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
5846 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
5847 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
5848 cd098c41 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link' at the same time")
5849 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
5850 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
5851 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
5852 cd098c41 Guido Trotter
        nic_dict['link'] = None
5853 cd098c41 Guido Trotter
5854 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
5855 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
5856 5c44da6a Guido Trotter
        if nic_mac is None:
5857 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
5858 5c44da6a Guido Trotter
5859 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
5860 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
5861 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
5862 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
5863 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
5864 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
5865 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
5866 5c44da6a Guido Trotter
                                     " modifying an existing nic")
5867 5c44da6a Guido Trotter
5868 24991749 Iustin Pop
    if nic_addremove > 1:
5869 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
5870 24991749 Iustin Pop
                                 " supported at a time")
5871 24991749 Iustin Pop
5872 1a5c7281 Guido Trotter
  def ExpandNames(self):
5873 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
5874 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
5875 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5876 74409b12 Iustin Pop
5877 74409b12 Iustin Pop
  def DeclareLocks(self, level):
5878 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
5879 74409b12 Iustin Pop
      self._LockInstancesNodes()
5880 a8083063 Iustin Pop
5881 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5882 a8083063 Iustin Pop
    """Build hooks env.
5883 a8083063 Iustin Pop

5884 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
5885 a8083063 Iustin Pop

5886 a8083063 Iustin Pop
    """
5887 396e1b78 Michael Hanselmann
    args = dict()
5888 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
5889 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
5890 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
5891 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
5892 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
5893 d8dcf3c9 Guido Trotter
    # information at all.
5894 d8dcf3c9 Guido Trotter
    if self.op.nics:
5895 d8dcf3c9 Guido Trotter
      args['nics'] = []
5896 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
5897 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
5898 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
5899 d8dcf3c9 Guido Trotter
        if idx in nic_override:
5900 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
5901 d8dcf3c9 Guido Trotter
        else:
5902 d8dcf3c9 Guido Trotter
          this_nic_override = {}
5903 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
5904 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
5905 d8dcf3c9 Guido Trotter
        else:
5906 d8dcf3c9 Guido Trotter
          ip = nic.ip
5907 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
5908 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
5909 d8dcf3c9 Guido Trotter
        else:
5910 d8dcf3c9 Guido Trotter
          mac = nic.mac
5911 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
5912 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
5913 62f0dd02 Guido Trotter
        else:
5914 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
5915 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
5916 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
5917 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
5918 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
5919 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
5920 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
5921 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
5922 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
5923 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
5924 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
5925 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
5926 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
5927 d8dcf3c9 Guido Trotter
5928 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
5929 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
5930 a8083063 Iustin Pop
    return env, nl, nl
5931 a8083063 Iustin Pop
5932 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
5933 0329617a Guido Trotter
                        default_values, parameter_types):
5934 0329617a Guido Trotter
    """Return the new params dict for the given params.
5935 0329617a Guido Trotter

5936 0329617a Guido Trotter
    @type old_params: dict
5937 0329617a Guido Trotter
    @type old_params: old parameters
5938 0329617a Guido Trotter
    @type update_dict: dict
5939 0329617a Guido Trotter
    @type update_dict: dict containing new parameter values,
5940 0329617a Guido Trotter
                       or constants.VALUE_DEFAULT to reset the
5941 0329617a Guido Trotter
                       parameter to its default value
5942 0329617a Guido Trotter
    @type default_values: dict
5943 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
5944 0329617a Guido Trotter
    @type parameter_types: dict
5945 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
5946 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
5947 0329617a Guido Trotter
    @rtype: (dict, dict)
5948 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
5949 0329617a Guido Trotter

5950 0329617a Guido Trotter
    """
5951 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
5952 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
5953 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
5954 0329617a Guido Trotter
        try:
5955 0329617a Guido Trotter
          del params_copy[key]
5956 0329617a Guido Trotter
        except KeyError:
5957 0329617a Guido Trotter
          pass
5958 0329617a Guido Trotter
      else:
5959 0329617a Guido Trotter
        params_copy[key] = val
5960 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
5961 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
5962 0329617a Guido Trotter
    return (params_copy, params_filled)
5963 0329617a Guido Trotter
5964 a8083063 Iustin Pop
  def CheckPrereq(self):
5965 a8083063 Iustin Pop
    """Check prerequisites.
5966 a8083063 Iustin Pop

5967 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
5968 a8083063 Iustin Pop

5969 a8083063 Iustin Pop
    """
5970 24991749 Iustin Pop
    force = self.force = self.op.force
5971 a8083063 Iustin Pop
5972 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
5973 31a853d2 Iustin Pop
5974 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5975 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
5976 1a5c7281 Guido Trotter
    assert self.instance is not None, \
5977 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5978 6b12959c Iustin Pop
    pnode = instance.primary_node
5979 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
5980 74409b12 Iustin Pop
5981 338e51e8 Iustin Pop
    # hvparams processing
5982 74409b12 Iustin Pop
    if self.op.hvparams:
5983 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
5984 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
5985 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
5986 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
5987 74409b12 Iustin Pop
      # local check
5988 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
5989 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
5990 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
5991 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
5992 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
5993 338e51e8 Iustin Pop
    else:
5994 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
5995 338e51e8 Iustin Pop
5996 338e51e8 Iustin Pop
    # beparams processing
5997 338e51e8 Iustin Pop
    if self.op.beparams:
5998 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
5999 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
6000 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
6001 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
6002 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6003 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6004 338e51e8 Iustin Pop
    else:
6005 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6006 74409b12 Iustin Pop
6007 cfefe007 Guido Trotter
    self.warn = []
6008 647a5d80 Iustin Pop
6009 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6010 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6011 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6012 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6013 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6014 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6015 72737a7f Iustin Pop
                                                  instance.hypervisor)
6016 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6017 72737a7f Iustin Pop
                                         instance.hypervisor)
6018 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
6019 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
6020 070e998b Iustin Pop
      if msg:
6021 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6022 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
6023 070e998b Iustin Pop
                         (pnode,  msg))
6024 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6025 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
6026 070e998b Iustin Pop
                         " free memory information" % pnode)
6027 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
6028 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
6029 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
6030 cfefe007 Guido Trotter
      else:
6031 7ad1af4a Iustin Pop
        if instance_info.payload:
6032 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
6033 cfefe007 Guido Trotter
        else:
6034 cfefe007 Guido Trotter
          # Assume instance not running
6035 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6036 cfefe007 Guido Trotter
          # and we have no other way to check)
6037 cfefe007 Guido Trotter
          current_mem = 0
6038 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6039 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
6040 cfefe007 Guido Trotter
        if miss_mem > 0:
6041 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6042 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6043 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6044 cfefe007 Guido Trotter
6045 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6046 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
6047 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6048 ea33068f Iustin Pop
            continue
6049 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
6050 070e998b Iustin Pop
          if msg:
6051 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
6052 070e998b Iustin Pop
                             (node, msg))
6053 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
6054 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
6055 070e998b Iustin Pop
                             " memory information" % node)
6056 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6057 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6058 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6059 5bc84f33 Alexander Schreiber
6060 24991749 Iustin Pop
    # NIC processing
6061 cd098c41 Guido Trotter
    self.nic_pnew = {}
6062 cd098c41 Guido Trotter
    self.nic_pinst = {}
6063 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6064 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6065 24991749 Iustin Pop
        if not instance.nics:
6066 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6067 24991749 Iustin Pop
        continue
6068 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6069 24991749 Iustin Pop
        # an existing nic
6070 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6071 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6072 24991749 Iustin Pop
                                     " are 0 to %d" %
6073 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6074 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
6075 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
6076 cd098c41 Guido Trotter
      else:
6077 cd098c41 Guido Trotter
        old_nic_params = {}
6078 cd098c41 Guido Trotter
        old_nic_ip = None
6079 cd098c41 Guido Trotter
6080 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
6081 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
6082 cd098c41 Guido Trotter
                                 if key in nic_dict])
6083 cd098c41 Guido Trotter
6084 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6085 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6086 cd098c41 Guido Trotter
6087 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
6088 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6089 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
6090 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
6091 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6092 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
6093 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
6094 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6095 cd098c41 Guido Trotter
6096 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6097 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6098 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6099 35c0c8da Iustin Pop
        if msg:
6100 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6101 24991749 Iustin Pop
          if self.force:
6102 24991749 Iustin Pop
            self.warn.append(msg)
6103 24991749 Iustin Pop
          else:
6104 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6105 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6106 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
6107 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
6108 cd098c41 Guido Trotter
        else:
6109 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
6110 cd098c41 Guido Trotter
        if nic_ip is None:
6111 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6112 cd098c41 Guido Trotter
                                     ' on a routed nic')
6113 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6114 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6115 5c44da6a Guido Trotter
        if nic_mac is None:
6116 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6117 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6118 5c44da6a Guido Trotter
          # otherwise generate the mac
6119 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6120 5c44da6a Guido Trotter
        else:
6121 5c44da6a Guido Trotter
          # or validate/reserve the current one
6122 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6123 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6124 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6125 24991749 Iustin Pop
6126 24991749 Iustin Pop
    # DISK processing
6127 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6128 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6129 24991749 Iustin Pop
                                 " diskless instances")
6130 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6131 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6132 24991749 Iustin Pop
        if len(instance.disks) == 1:
6133 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6134 24991749 Iustin Pop
                                     " an instance")
6135 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6136 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6137 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
6138 aca13712 Iustin Pop
        if msg:
6139 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6140 aca13712 Iustin Pop
                                     (pnode, msg))
6141 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
6142 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6143 24991749 Iustin Pop
                                     " disks.")
6144 24991749 Iustin Pop
6145 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6146 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6147 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6148 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6149 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6150 24991749 Iustin Pop
        # an existing disk
6151 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6152 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6153 24991749 Iustin Pop
                                     " are 0 to %d" %
6154 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6155 24991749 Iustin Pop
6156 a8083063 Iustin Pop
    return
6157 a8083063 Iustin Pop
6158 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6159 a8083063 Iustin Pop
    """Modifies an instance.
6160 a8083063 Iustin Pop

6161 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6162 24991749 Iustin Pop

6163 a8083063 Iustin Pop
    """
6164 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6165 cfefe007 Guido Trotter
    # feedback_fn there.
6166 cfefe007 Guido Trotter
    for warn in self.warn:
6167 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6168 cfefe007 Guido Trotter
6169 a8083063 Iustin Pop
    result = []
6170 a8083063 Iustin Pop
    instance = self.instance
6171 cd098c41 Guido Trotter
    cluster = self.cluster
6172 24991749 Iustin Pop
    # disk changes
6173 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6174 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6175 24991749 Iustin Pop
        # remove the last disk
6176 24991749 Iustin Pop
        device = instance.disks.pop()
6177 24991749 Iustin Pop
        device_idx = len(instance.disks)
6178 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6179 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6180 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6181 e1bc0878 Iustin Pop
          if msg:
6182 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6183 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6184 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6185 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6186 24991749 Iustin Pop
        # add a new disk
6187 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6188 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6189 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6190 24991749 Iustin Pop
        else:
6191 24991749 Iustin Pop
          file_driver = file_path = None
6192 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6193 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6194 24991749 Iustin Pop
                                         instance.disk_template,
6195 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6196 24991749 Iustin Pop
                                         instance.secondary_nodes,
6197 24991749 Iustin Pop
                                         [disk_dict],
6198 24991749 Iustin Pop
                                         file_path,
6199 24991749 Iustin Pop
                                         file_driver,
6200 24991749 Iustin Pop
                                         disk_idx_base)[0]
6201 24991749 Iustin Pop
        instance.disks.append(new_disk)
6202 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6203 24991749 Iustin Pop
6204 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6205 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6206 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6207 24991749 Iustin Pop
        #HARDCODE
6208 428958aa Iustin Pop
        for node in instance.all_nodes:
6209 428958aa Iustin Pop
          f_create = node == instance.primary_node
6210 796cab27 Iustin Pop
          try:
6211 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6212 428958aa Iustin Pop
                            f_create, info, f_create)
6213 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6214 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6215 428958aa Iustin Pop
                            " node %s: %s",
6216 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6217 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6218 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6219 24991749 Iustin Pop
      else:
6220 24991749 Iustin Pop
        # change a given disk
6221 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6222 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6223 24991749 Iustin Pop
    # NIC changes
6224 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6225 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6226 24991749 Iustin Pop
        # remove the last nic
6227 24991749 Iustin Pop
        del instance.nics[-1]
6228 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6229 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6230 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6231 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6232 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
6233 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
6234 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6235 24991749 Iustin Pop
        instance.nics.append(new_nic)
6236 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6237 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6238 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
6239 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6240 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6241 cd098c41 Guido Trotter
                       )))
6242 24991749 Iustin Pop
      else:
6243 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
6244 24991749 Iustin Pop
          if key in nic_dict:
6245 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6246 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
6247 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6248 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
6249 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
6250 24991749 Iustin Pop
6251 24991749 Iustin Pop
    # hvparams changes
6252 74409b12 Iustin Pop
    if self.op.hvparams:
6253 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6254 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6255 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6256 24991749 Iustin Pop
6257 24991749 Iustin Pop
    # beparams changes
6258 338e51e8 Iustin Pop
    if self.op.beparams:
6259 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6260 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6261 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6262 a8083063 Iustin Pop
6263 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6264 a8083063 Iustin Pop
6265 a8083063 Iustin Pop
    return result
6266 a8083063 Iustin Pop
6267 a8083063 Iustin Pop
6268 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6269 a8083063 Iustin Pop
  """Query the exports list
6270 a8083063 Iustin Pop

6271 a8083063 Iustin Pop
  """
6272 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6273 21a15682 Guido Trotter
  REQ_BGL = False
6274 21a15682 Guido Trotter
6275 21a15682 Guido Trotter
  def ExpandNames(self):
6276 21a15682 Guido Trotter
    self.needed_locks = {}
6277 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6278 21a15682 Guido Trotter
    if not self.op.nodes:
6279 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6280 21a15682 Guido Trotter
    else:
6281 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6282 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6283 a8083063 Iustin Pop
6284 a8083063 Iustin Pop
  def CheckPrereq(self):
6285 21a15682 Guido Trotter
    """Check prerequisites.
6286 a8083063 Iustin Pop

6287 a8083063 Iustin Pop
    """
6288 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6289 a8083063 Iustin Pop
6290 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6291 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6292 a8083063 Iustin Pop

6293 e4376078 Iustin Pop
    @rtype: dict
6294 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6295 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6296 e4376078 Iustin Pop
        that node.
6297 a8083063 Iustin Pop

6298 a8083063 Iustin Pop
    """
6299 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6300 b04285f2 Guido Trotter
    result = {}
6301 b04285f2 Guido Trotter
    for node in rpcresult:
6302 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
6303 b04285f2 Guido Trotter
        result[node] = False
6304 b04285f2 Guido Trotter
      else:
6305 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
6306 b04285f2 Guido Trotter
6307 b04285f2 Guido Trotter
    return result
6308 a8083063 Iustin Pop
6309 a8083063 Iustin Pop
6310 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6311 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6312 a8083063 Iustin Pop

6313 a8083063 Iustin Pop
  """
6314 a8083063 Iustin Pop
  HPATH = "instance-export"
6315 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6316 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6317 6657590e Guido Trotter
  REQ_BGL = False
6318 6657590e Guido Trotter
6319 6657590e Guido Trotter
  def ExpandNames(self):
6320 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6321 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6322 6657590e Guido Trotter
    #
6323 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6324 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6325 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6326 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6327 6657590e Guido Trotter
    #    then one to remove, after
6328 6657590e Guido Trotter
    #  - removing the removal operation altoghether
6329 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6330 6657590e Guido Trotter
6331 6657590e Guido Trotter
  def DeclareLocks(self, level):
6332 6657590e Guido Trotter
    """Last minute lock declaration."""
6333 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6334 a8083063 Iustin Pop
6335 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6336 a8083063 Iustin Pop
    """Build hooks env.
6337 a8083063 Iustin Pop

6338 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6339 a8083063 Iustin Pop

6340 a8083063 Iustin Pop
    """
6341 a8083063 Iustin Pop
    env = {
6342 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6343 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6344 a8083063 Iustin Pop
      }
6345 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6346 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6347 a8083063 Iustin Pop
          self.op.target_node]
6348 a8083063 Iustin Pop
    return env, nl, nl
6349 a8083063 Iustin Pop
6350 a8083063 Iustin Pop
  def CheckPrereq(self):
6351 a8083063 Iustin Pop
    """Check prerequisites.
6352 a8083063 Iustin Pop

6353 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6354 a8083063 Iustin Pop

6355 a8083063 Iustin Pop
    """
6356 6657590e Guido Trotter
    instance_name = self.op.instance_name
6357 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6358 6657590e Guido Trotter
    assert self.instance is not None, \
6359 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6360 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6361 a8083063 Iustin Pop
6362 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6363 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6364 a8083063 Iustin Pop
6365 268b8e42 Iustin Pop
    if self.dst_node is None:
6366 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6367 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6368 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6369 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6370 a8083063 Iustin Pop
6371 b6023d6c Manuel Franceschini
    # instance disk type verification
6372 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6373 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6374 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6375 b6023d6c Manuel Franceschini
                                   " file-based disks")
6376 b6023d6c Manuel Franceschini
6377 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6378 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6379 a8083063 Iustin Pop

6380 a8083063 Iustin Pop
    """
6381 a8083063 Iustin Pop
    instance = self.instance
6382 a8083063 Iustin Pop
    dst_node = self.dst_node
6383 a8083063 Iustin Pop
    src_node = instance.primary_node
6384 a8083063 Iustin Pop
    if self.op.shutdown:
6385 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6386 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6387 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
6388 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
6389 a8083063 Iustin Pop
6390 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6391 a8083063 Iustin Pop
6392 a8083063 Iustin Pop
    snap_disks = []
6393 a8083063 Iustin Pop
6394 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6395 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6396 998c712c Iustin Pop
    for disk in instance.disks:
6397 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6398 998c712c Iustin Pop
6399 a8083063 Iustin Pop
    try:
6400 a8083063 Iustin Pop
      for disk in instance.disks:
6401 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6402 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6403 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6404 87812fd3 Iustin Pop
        if msg:
6405 87812fd3 Iustin Pop
          self.LogWarning("Could not snapshot block device %s on node %s: %s",
6406 87812fd3 Iustin Pop
                          disk.logical_id[1], src_node, msg)
6407 19d7f90a Guido Trotter
          snap_disks.append(False)
6408 19d7f90a Guido Trotter
        else:
6409 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
6410 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6411 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
6412 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6413 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6414 a8083063 Iustin Pop
6415 a8083063 Iustin Pop
    finally:
6416 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6417 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6418 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6419 dd279568 Iustin Pop
        if msg:
6420 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6421 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6422 a8083063 Iustin Pop
6423 a8083063 Iustin Pop
    # TODO: check for size
6424 a8083063 Iustin Pop
6425 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6426 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6427 19d7f90a Guido Trotter
      if dev:
6428 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6429 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6430 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6431 ba55d062 Iustin Pop
        if msg:
6432 19d7f90a Guido Trotter
          self.LogWarning("Could not export block device %s from node %s to"
6433 ba55d062 Iustin Pop
                          " node %s: %s", dev.logical_id[1], src_node,
6434 ba55d062 Iustin Pop
                          dst_node.name, msg)
6435 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6436 e1bc0878 Iustin Pop
        if msg:
6437 19d7f90a Guido Trotter
          self.LogWarning("Could not remove snapshot block device %s from node"
6438 e1bc0878 Iustin Pop
                          " %s: %s", dev.logical_id[1], src_node, msg)
6439 a8083063 Iustin Pop
6440 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6441 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6442 9b201a0d Iustin Pop
    if msg:
6443 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
6444 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
6445 a8083063 Iustin Pop
6446 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6447 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6448 a8083063 Iustin Pop
6449 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6450 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6451 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6452 35fbcd11 Iustin Pop
    iname = instance.name
6453 a8083063 Iustin Pop
    if nodelist:
6454 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6455 a8083063 Iustin Pop
      for node in exportlist:
6456 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
6457 781de953 Iustin Pop
          continue
6458 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
6459 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6460 35fbcd11 Iustin Pop
          if msg:
6461 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6462 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
6463 5c947f38 Iustin Pop
6464 5c947f38 Iustin Pop
6465 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6466 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6467 9ac99fda Guido Trotter

6468 9ac99fda Guido Trotter
  """
6469 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6470 3656b3af Guido Trotter
  REQ_BGL = False
6471 3656b3af Guido Trotter
6472 3656b3af Guido Trotter
  def ExpandNames(self):
6473 3656b3af Guido Trotter
    self.needed_locks = {}
6474 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6475 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6476 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6477 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6478 9ac99fda Guido Trotter
6479 9ac99fda Guido Trotter
  def CheckPrereq(self):
6480 9ac99fda Guido Trotter
    """Check prerequisites.
6481 9ac99fda Guido Trotter
    """
6482 9ac99fda Guido Trotter
    pass
6483 9ac99fda Guido Trotter
6484 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6485 9ac99fda Guido Trotter
    """Remove any export.
6486 9ac99fda Guido Trotter

6487 9ac99fda Guido Trotter
    """
6488 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6489 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6490 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6491 9ac99fda Guido Trotter
    fqdn_warn = False
6492 9ac99fda Guido Trotter
    if not instance_name:
6493 9ac99fda Guido Trotter
      fqdn_warn = True
6494 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6495 9ac99fda Guido Trotter
6496 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6497 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
6498 9ac99fda Guido Trotter
    found = False
6499 9ac99fda Guido Trotter
    for node in exportlist:
6500 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
6501 1b7bfbb7 Iustin Pop
      if msg:
6502 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6503 781de953 Iustin Pop
        continue
6504 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
6505 9ac99fda Guido Trotter
        found = True
6506 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6507 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6508 35fbcd11 Iustin Pop
        if msg:
6509 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6510 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
6511 9ac99fda Guido Trotter
6512 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6513 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6514 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6515 9ac99fda Guido Trotter
                  " Domain Name.")
6516 9ac99fda Guido Trotter
6517 9ac99fda Guido Trotter
6518 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6519 5c947f38 Iustin Pop
  """Generic tags LU.
6520 5c947f38 Iustin Pop

6521 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6522 5c947f38 Iustin Pop

6523 5c947f38 Iustin Pop
  """
6524 5c947f38 Iustin Pop
6525 8646adce Guido Trotter
  def ExpandNames(self):
6526 8646adce Guido Trotter
    self.needed_locks = {}
6527 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6528 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6529 5c947f38 Iustin Pop
      if name is None:
6530 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6531 3ecf6786 Iustin Pop
                                   (self.op.name,))
6532 5c947f38 Iustin Pop
      self.op.name = name
6533 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6534 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6535 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6536 5c947f38 Iustin Pop
      if name is None:
6537 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6538 3ecf6786 Iustin Pop
                                   (self.op.name,))
6539 5c947f38 Iustin Pop
      self.op.name = name
6540 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6541 8646adce Guido Trotter
6542 8646adce Guido Trotter
  def CheckPrereq(self):
6543 8646adce Guido Trotter
    """Check prerequisites.
6544 8646adce Guido Trotter

6545 8646adce Guido Trotter
    """
6546 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6547 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6548 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6549 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6550 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6551 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6552 5c947f38 Iustin Pop
    else:
6553 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6554 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6555 5c947f38 Iustin Pop
6556 5c947f38 Iustin Pop
6557 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6558 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6559 5c947f38 Iustin Pop

6560 5c947f38 Iustin Pop
  """
6561 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6562 8646adce Guido Trotter
  REQ_BGL = False
6563 5c947f38 Iustin Pop
6564 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6565 5c947f38 Iustin Pop
    """Returns the tag list.
6566 5c947f38 Iustin Pop

6567 5c947f38 Iustin Pop
    """
6568 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6569 5c947f38 Iustin Pop
6570 5c947f38 Iustin Pop
6571 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6572 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6573 73415719 Iustin Pop

6574 73415719 Iustin Pop
  """
6575 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6576 8646adce Guido Trotter
  REQ_BGL = False
6577 8646adce Guido Trotter
6578 8646adce Guido Trotter
  def ExpandNames(self):
6579 8646adce Guido Trotter
    self.needed_locks = {}
6580 73415719 Iustin Pop
6581 73415719 Iustin Pop
  def CheckPrereq(self):
6582 73415719 Iustin Pop
    """Check prerequisites.
6583 73415719 Iustin Pop

6584 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6585 73415719 Iustin Pop

6586 73415719 Iustin Pop
    """
6587 73415719 Iustin Pop
    try:
6588 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6589 73415719 Iustin Pop
    except re.error, err:
6590 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6591 73415719 Iustin Pop
                                 (self.op.pattern, err))
6592 73415719 Iustin Pop
6593 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6594 73415719 Iustin Pop
    """Returns the tag list.
6595 73415719 Iustin Pop

6596 73415719 Iustin Pop
    """
6597 73415719 Iustin Pop
    cfg = self.cfg
6598 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6599 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6600 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6601 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6602 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6603 73415719 Iustin Pop
    results = []
6604 73415719 Iustin Pop
    for path, target in tgts:
6605 73415719 Iustin Pop
      for tag in target.GetTags():
6606 73415719 Iustin Pop
        if self.re.search(tag):
6607 73415719 Iustin Pop
          results.append((path, tag))
6608 73415719 Iustin Pop
    return results
6609 73415719 Iustin Pop
6610 73415719 Iustin Pop
6611 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6612 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6613 5c947f38 Iustin Pop

6614 5c947f38 Iustin Pop
  """
6615 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6616 8646adce Guido Trotter
  REQ_BGL = False
6617 5c947f38 Iustin Pop
6618 5c947f38 Iustin Pop
  def CheckPrereq(self):
6619 5c947f38 Iustin Pop
    """Check prerequisites.
6620 5c947f38 Iustin Pop

6621 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6622 5c947f38 Iustin Pop

6623 5c947f38 Iustin Pop
    """
6624 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6625 f27302fa Iustin Pop
    for tag in self.op.tags:
6626 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6627 5c947f38 Iustin Pop
6628 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6629 5c947f38 Iustin Pop
    """Sets the tag.
6630 5c947f38 Iustin Pop

6631 5c947f38 Iustin Pop
    """
6632 5c947f38 Iustin Pop
    try:
6633 f27302fa Iustin Pop
      for tag in self.op.tags:
6634 f27302fa Iustin Pop
        self.target.AddTag(tag)
6635 5c947f38 Iustin Pop
    except errors.TagError, err:
6636 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6637 5c947f38 Iustin Pop
    try:
6638 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6639 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6640 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6641 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6642 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6643 5c947f38 Iustin Pop
6644 5c947f38 Iustin Pop
6645 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6646 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6647 5c947f38 Iustin Pop

6648 5c947f38 Iustin Pop
  """
6649 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6650 8646adce Guido Trotter
  REQ_BGL = False
6651 5c947f38 Iustin Pop
6652 5c947f38 Iustin Pop
  def CheckPrereq(self):
6653 5c947f38 Iustin Pop
    """Check prerequisites.
6654 5c947f38 Iustin Pop

6655 5c947f38 Iustin Pop
    This checks that we have the given tag.
6656 5c947f38 Iustin Pop

6657 5c947f38 Iustin Pop
    """
6658 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6659 f27302fa Iustin Pop
    for tag in self.op.tags:
6660 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6661 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6662 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6663 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6664 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6665 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6666 f27302fa Iustin Pop
      diff_names.sort()
6667 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6668 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6669 5c947f38 Iustin Pop
6670 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6671 5c947f38 Iustin Pop
    """Remove the tag from the object.
6672 5c947f38 Iustin Pop

6673 5c947f38 Iustin Pop
    """
6674 f27302fa Iustin Pop
    for tag in self.op.tags:
6675 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6676 5c947f38 Iustin Pop
    try:
6677 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6678 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6679 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6680 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6681 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6682 06009e27 Iustin Pop
6683 0eed6e61 Guido Trotter
6684 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6685 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6686 06009e27 Iustin Pop

6687 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6688 06009e27 Iustin Pop
  time.
6689 06009e27 Iustin Pop

6690 06009e27 Iustin Pop
  """
6691 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6692 fbe9022f Guido Trotter
  REQ_BGL = False
6693 06009e27 Iustin Pop
6694 fbe9022f Guido Trotter
  def ExpandNames(self):
6695 fbe9022f Guido Trotter
    """Expand names and set required locks.
6696 06009e27 Iustin Pop

6697 fbe9022f Guido Trotter
    This expands the node list, if any.
6698 06009e27 Iustin Pop

6699 06009e27 Iustin Pop
    """
6700 fbe9022f Guido Trotter
    self.needed_locks = {}
6701 06009e27 Iustin Pop
    if self.op.on_nodes:
6702 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6703 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6704 fbe9022f Guido Trotter
      # more information.
6705 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6706 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6707 fbe9022f Guido Trotter
6708 fbe9022f Guido Trotter
  def CheckPrereq(self):
6709 fbe9022f Guido Trotter
    """Check prerequisites.
6710 fbe9022f Guido Trotter

6711 fbe9022f Guido Trotter
    """
6712 06009e27 Iustin Pop
6713 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6714 06009e27 Iustin Pop
    """Do the actual sleep.
6715 06009e27 Iustin Pop

6716 06009e27 Iustin Pop
    """
6717 06009e27 Iustin Pop
    if self.op.on_master:
6718 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6719 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6720 06009e27 Iustin Pop
    if self.op.on_nodes:
6721 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6722 06009e27 Iustin Pop
      for node, node_result in result.items():
6723 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
6724 d61df03e Iustin Pop
6725 d61df03e Iustin Pop
6726 d1c2dd75 Iustin Pop
class IAllocator(object):
6727 d1c2dd75 Iustin Pop
  """IAllocator framework.
6728 d61df03e Iustin Pop

6729 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6730 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6731 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6732 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6733 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6734 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6735 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6736 d1c2dd75 Iustin Pop
      easy usage
6737 d61df03e Iustin Pop

6738 d61df03e Iustin Pop
  """
6739 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6740 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6741 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6742 d1c2dd75 Iustin Pop
    ]
6743 29859cb7 Iustin Pop
  _RELO_KEYS = [
6744 29859cb7 Iustin Pop
    "relocate_from",
6745 29859cb7 Iustin Pop
    ]
6746 d1c2dd75 Iustin Pop
6747 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6748 72737a7f Iustin Pop
    self.lu = lu
6749 d1c2dd75 Iustin Pop
    # init buffer variables
6750 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6751 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6752 29859cb7 Iustin Pop
    self.mode = mode
6753 29859cb7 Iustin Pop
    self.name = name
6754 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6755 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6756 a0add446 Iustin Pop
    self.hypervisor = None
6757 29859cb7 Iustin Pop
    self.relocate_from = None
6758 27579978 Iustin Pop
    # computed fields
6759 27579978 Iustin Pop
    self.required_nodes = None
6760 d1c2dd75 Iustin Pop
    # init result fields
6761 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6762 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6763 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6764 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6765 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6766 29859cb7 Iustin Pop
    else:
6767 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6768 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6769 d1c2dd75 Iustin Pop
    for key in kwargs:
6770 29859cb7 Iustin Pop
      if key not in keyset:
6771 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6772 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6773 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6774 29859cb7 Iustin Pop
    for key in keyset:
6775 d1c2dd75 Iustin Pop
      if key not in kwargs:
6776 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6777 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6778 d1c2dd75 Iustin Pop
    self._BuildInputData()
6779 d1c2dd75 Iustin Pop
6780 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6781 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6782 d1c2dd75 Iustin Pop

6783 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6784 d1c2dd75 Iustin Pop

6785 d1c2dd75 Iustin Pop
    """
6786 72737a7f Iustin Pop
    cfg = self.lu.cfg
6787 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6788 d1c2dd75 Iustin Pop
    # cluster data
6789 d1c2dd75 Iustin Pop
    data = {
6790 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6791 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6792 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6793 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6794 d1c2dd75 Iustin Pop
      # we don't have job IDs
6795 d61df03e Iustin Pop
      }
6796 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6797 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6798 6286519f Iustin Pop
6799 d1c2dd75 Iustin Pop
    # node data
6800 d1c2dd75 Iustin Pop
    node_results = {}
6801 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6802 8cc7e742 Guido Trotter
6803 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6804 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6805 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6806 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6807 8cc7e742 Guido Trotter
6808 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6809 a0add446 Iustin Pop
                                           hypervisor_name)
6810 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6811 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6812 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6813 1325da74 Iustin Pop
      # first fill in static (config-based) values
6814 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6815 d1c2dd75 Iustin Pop
      pnr = {
6816 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6817 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6818 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6819 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6820 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6821 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6822 d1c2dd75 Iustin Pop
        }
6823 1325da74 Iustin Pop
6824 1325da74 Iustin Pop
      if not ninfo.offline:
6825 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
6826 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
6827 4c4e4e1e Iustin Pop
                                nname)
6828 070e998b Iustin Pop
        remote_info = nresult.payload
6829 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6830 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6831 1325da74 Iustin Pop
          if attr not in remote_info:
6832 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6833 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6834 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
6835 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6836 070e998b Iustin Pop
                                     " for '%s': %s" %
6837 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
6838 1325da74 Iustin Pop
        # compute memory used by primary instances
6839 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6840 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6841 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6842 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6843 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
6844 1325da74 Iustin Pop
              i_used_mem = 0
6845 1325da74 Iustin Pop
            else:
6846 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
6847 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6848 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6849 1325da74 Iustin Pop
6850 1325da74 Iustin Pop
            if iinfo.admin_up:
6851 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6852 1325da74 Iustin Pop
6853 1325da74 Iustin Pop
        # compute memory used by instances
6854 1325da74 Iustin Pop
        pnr_dyn = {
6855 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6856 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6857 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6858 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6859 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6860 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6861 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6862 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6863 1325da74 Iustin Pop
          }
6864 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6865 1325da74 Iustin Pop
6866 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6867 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6868 d1c2dd75 Iustin Pop
6869 d1c2dd75 Iustin Pop
    # instance data
6870 d1c2dd75 Iustin Pop
    instance_data = {}
6871 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
6872 a9fe7e8f Guido Trotter
      nic_data = []
6873 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
6874 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
6875 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
6876 a9fe7e8f Guido Trotter
            nic.nicparams)
6877 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
6878 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
6879 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
6880 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
6881 a9fe7e8f Guido Trotter
                   }
6882 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
6883 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
6884 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
6885 d1c2dd75 Iustin Pop
      pir = {
6886 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
6887 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
6888 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
6889 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
6890 d1c2dd75 Iustin Pop
        "os": iinfo.os,
6891 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
6892 d1c2dd75 Iustin Pop
        "nics": nic_data,
6893 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
6894 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
6895 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
6896 d1c2dd75 Iustin Pop
        }
6897 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
6898 88ae4f85 Iustin Pop
                                                 pir["disks"])
6899 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
6900 d61df03e Iustin Pop
6901 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
6902 d61df03e Iustin Pop
6903 d1c2dd75 Iustin Pop
    self.in_data = data
6904 d61df03e Iustin Pop
6905 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
6906 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
6907 d61df03e Iustin Pop

6908 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
6909 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6910 d61df03e Iustin Pop

6911 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6912 d1c2dd75 Iustin Pop
    done.
6913 d61df03e Iustin Pop

6914 d1c2dd75 Iustin Pop
    """
6915 d1c2dd75 Iustin Pop
    data = self.in_data
6916 d1c2dd75 Iustin Pop
6917 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
6918 d1c2dd75 Iustin Pop
6919 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
6920 27579978 Iustin Pop
      self.required_nodes = 2
6921 27579978 Iustin Pop
    else:
6922 27579978 Iustin Pop
      self.required_nodes = 1
6923 d1c2dd75 Iustin Pop
    request = {
6924 d1c2dd75 Iustin Pop
      "type": "allocate",
6925 d1c2dd75 Iustin Pop
      "name": self.name,
6926 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
6927 d1c2dd75 Iustin Pop
      "tags": self.tags,
6928 d1c2dd75 Iustin Pop
      "os": self.os,
6929 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
6930 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
6931 d1c2dd75 Iustin Pop
      "disks": self.disks,
6932 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
6933 d1c2dd75 Iustin Pop
      "nics": self.nics,
6934 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6935 d1c2dd75 Iustin Pop
      }
6936 d1c2dd75 Iustin Pop
    data["request"] = request
6937 298fe380 Iustin Pop
6938 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
6939 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
6940 298fe380 Iustin Pop

6941 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
6942 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
6943 d61df03e Iustin Pop

6944 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
6945 d1c2dd75 Iustin Pop
    done.
6946 d61df03e Iustin Pop

6947 d1c2dd75 Iustin Pop
    """
6948 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
6949 27579978 Iustin Pop
    if instance is None:
6950 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
6951 27579978 Iustin Pop
                                   " IAllocator" % self.name)
6952 27579978 Iustin Pop
6953 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
6954 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
6955 27579978 Iustin Pop
6956 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
6957 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
6958 2a139bb0 Iustin Pop
6959 27579978 Iustin Pop
    self.required_nodes = 1
6960 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
6961 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
6962 27579978 Iustin Pop
6963 d1c2dd75 Iustin Pop
    request = {
6964 2a139bb0 Iustin Pop
      "type": "relocate",
6965 d1c2dd75 Iustin Pop
      "name": self.name,
6966 27579978 Iustin Pop
      "disk_space_total": disk_space,
6967 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
6968 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
6969 d1c2dd75 Iustin Pop
      }
6970 27579978 Iustin Pop
    self.in_data["request"] = request
6971 d61df03e Iustin Pop
6972 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
6973 d1c2dd75 Iustin Pop
    """Build input data structures.
6974 d61df03e Iustin Pop

6975 d1c2dd75 Iustin Pop
    """
6976 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
6977 d61df03e Iustin Pop
6978 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6979 d1c2dd75 Iustin Pop
      self._AddNewInstance()
6980 d1c2dd75 Iustin Pop
    else:
6981 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
6982 d61df03e Iustin Pop
6983 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
6984 d61df03e Iustin Pop
6985 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
6986 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
6987 298fe380 Iustin Pop

6988 d1c2dd75 Iustin Pop
    """
6989 72737a7f Iustin Pop
    if call_fn is None:
6990 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
6991 d1c2dd75 Iustin Pop
    data = self.in_text
6992 298fe380 Iustin Pop
6993 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6994 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
6995 8d528b7c Iustin Pop
6996 87f5c298 Iustin Pop
    self.out_text = result.payload
6997 d1c2dd75 Iustin Pop
    if validate:
6998 d1c2dd75 Iustin Pop
      self._ValidateResult()
6999 298fe380 Iustin Pop
7000 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7001 d1c2dd75 Iustin Pop
    """Process the allocator results.
7002 538475ca Iustin Pop

7003 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7004 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7005 538475ca Iustin Pop

7006 d1c2dd75 Iustin Pop
    """
7007 d1c2dd75 Iustin Pop
    try:
7008 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7009 d1c2dd75 Iustin Pop
    except Exception, err:
7010 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7011 d1c2dd75 Iustin Pop
7012 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7013 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7014 538475ca Iustin Pop
7015 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7016 d1c2dd75 Iustin Pop
      if key not in rdict:
7017 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7018 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7019 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7020 538475ca Iustin Pop
7021 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7022 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7023 d1c2dd75 Iustin Pop
                               " is not a list")
7024 d1c2dd75 Iustin Pop
    self.out_data = rdict
7025 538475ca Iustin Pop
7026 538475ca Iustin Pop
7027 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7028 d61df03e Iustin Pop
  """Run allocator tests.
7029 d61df03e Iustin Pop

7030 d61df03e Iustin Pop
  This LU runs the allocator tests
7031 d61df03e Iustin Pop

7032 d61df03e Iustin Pop
  """
7033 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7034 d61df03e Iustin Pop
7035 d61df03e Iustin Pop
  def CheckPrereq(self):
7036 d61df03e Iustin Pop
    """Check prerequisites.
7037 d61df03e Iustin Pop

7038 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7039 d61df03e Iustin Pop

7040 d61df03e Iustin Pop
    """
7041 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7042 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7043 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7044 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7045 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7046 d61df03e Iustin Pop
                                     attr)
7047 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7048 d61df03e Iustin Pop
      if iname is not None:
7049 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7050 d61df03e Iustin Pop
                                   iname)
7051 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7052 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7053 d61df03e Iustin Pop
      for row in self.op.nics:
7054 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7055 d61df03e Iustin Pop
            "mac" not in row or
7056 d61df03e Iustin Pop
            "ip" not in row or
7057 d61df03e Iustin Pop
            "bridge" not in row):
7058 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7059 d61df03e Iustin Pop
                                     " 'nics' parameter")
7060 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7061 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7062 d61df03e Iustin Pop
      for row in self.op.disks:
7063 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7064 d61df03e Iustin Pop
            "size" not in row or
7065 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7066 d61df03e Iustin Pop
            "mode" not in row or
7067 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7068 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7069 d61df03e Iustin Pop
                                     " 'disks' parameter")
7070 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7071 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7072 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7073 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7074 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7075 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7076 d61df03e Iustin Pop
      if fname is None:
7077 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7078 d61df03e Iustin Pop
                                   self.op.name)
7079 d61df03e Iustin Pop
      self.op.name = fname
7080 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7081 d61df03e Iustin Pop
    else:
7082 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7083 d61df03e Iustin Pop
                                 self.op.mode)
7084 d61df03e Iustin Pop
7085 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7086 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7087 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7088 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7089 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7090 d61df03e Iustin Pop
                                 self.op.direction)
7091 d61df03e Iustin Pop
7092 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7093 d61df03e Iustin Pop
    """Run the allocator test.
7094 d61df03e Iustin Pop

7095 d61df03e Iustin Pop
    """
7096 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7097 72737a7f Iustin Pop
      ial = IAllocator(self,
7098 29859cb7 Iustin Pop
                       mode=self.op.mode,
7099 29859cb7 Iustin Pop
                       name=self.op.name,
7100 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7101 29859cb7 Iustin Pop
                       disks=self.op.disks,
7102 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7103 29859cb7 Iustin Pop
                       os=self.op.os,
7104 29859cb7 Iustin Pop
                       tags=self.op.tags,
7105 29859cb7 Iustin Pop
                       nics=self.op.nics,
7106 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7107 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7108 29859cb7 Iustin Pop
                       )
7109 29859cb7 Iustin Pop
    else:
7110 72737a7f Iustin Pop
      ial = IAllocator(self,
7111 29859cb7 Iustin Pop
                       mode=self.op.mode,
7112 29859cb7 Iustin Pop
                       name=self.op.name,
7113 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7114 29859cb7 Iustin Pop
                       )
7115 d61df03e Iustin Pop
7116 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7117 d1c2dd75 Iustin Pop
      result = ial.in_text
7118 298fe380 Iustin Pop
    else:
7119 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7120 d1c2dd75 Iustin Pop
      result = ial.out_text
7121 298fe380 Iustin Pop
    return result