Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ f4a2f532

History | View | Annotate | Download (253.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import time
29 a8083063 Iustin Pop
import re
30 a8083063 Iustin Pop
import platform
31 ffa1c0dc Iustin Pop
import logging
32 74409b12 Iustin Pop
import copy
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import ssh
35 a8083063 Iustin Pop
from ganeti import utils
36 a8083063 Iustin Pop
from ganeti import errors
37 a8083063 Iustin Pop
from ganeti import hypervisor
38 6048c986 Guido Trotter
from ganeti import locking
39 a8083063 Iustin Pop
from ganeti import constants
40 a8083063 Iustin Pop
from ganeti import objects
41 8d14b30d Iustin Pop
from ganeti import serializer
42 112f18a5 Iustin Pop
from ganeti import ssconf
43 d61df03e Iustin Pop
44 d61df03e Iustin Pop
45 a8083063 Iustin Pop
class LogicalUnit(object):
46 396e1b78 Michael Hanselmann
  """Logical Unit base class.
47 a8083063 Iustin Pop

48 a8083063 Iustin Pop
  Subclasses must follow these rules:
49 d465bdc8 Guido Trotter
    - implement ExpandNames
50 d465bdc8 Guido Trotter
    - implement CheckPrereq
51 a8083063 Iustin Pop
    - implement Exec
52 a8083063 Iustin Pop
    - implement BuildHooksEnv
53 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
54 05f86716 Guido Trotter
    - optionally redefine their run requirements:
55 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
56 05f86716 Guido Trotter

57 05f86716 Guido Trotter
  Note that all commands require root permissions.
58 a8083063 Iustin Pop

59 20777413 Iustin Pop
  @ivar dry_run_result: the value (if any) that will be returned to the caller
60 20777413 Iustin Pop
      in dry-run mode (signalled by opcode dry_run parameter)
61 20777413 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 86d9d3bb Iustin Pop
    self.LogWarning = processor.LogWarning
91 86d9d3bb Iustin Pop
    self.LogInfo = processor.LogInfo
92 20777413 Iustin Pop
    # support for dry-run
93 20777413 Iustin Pop
    self.dry_run_result = None
94 c92b310a Michael Hanselmann
95 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
96 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
97 a8083063 Iustin Pop
      if attr_val is None:
98 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
99 3ecf6786 Iustin Pop
                                   attr_name)
100 4be4691d Iustin Pop
    self.CheckArguments()
101 a8083063 Iustin Pop
102 c92b310a Michael Hanselmann
  def __GetSSH(self):
103 c92b310a Michael Hanselmann
    """Returns the SshRunner object
104 c92b310a Michael Hanselmann

105 c92b310a Michael Hanselmann
    """
106 c92b310a Michael Hanselmann
    if not self.__ssh:
107 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
108 c92b310a Michael Hanselmann
    return self.__ssh
109 c92b310a Michael Hanselmann
110 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
111 c92b310a Michael Hanselmann
112 4be4691d Iustin Pop
  def CheckArguments(self):
113 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
114 4be4691d Iustin Pop

115 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
116 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
117 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
118 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
119 4be4691d Iustin Pop

120 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
121 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
122 4be4691d Iustin Pop
        waited for them)
123 4be4691d Iustin Pop

124 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
125 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
126 4be4691d Iustin Pop

127 4be4691d Iustin Pop
    """
128 4be4691d Iustin Pop
    pass
129 4be4691d Iustin Pop
130 d465bdc8 Guido Trotter
  def ExpandNames(self):
131 d465bdc8 Guido Trotter
    """Expand names for this LU.
132 d465bdc8 Guido Trotter

133 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
134 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
135 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
136 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
137 d465bdc8 Guido Trotter

138 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
139 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
140 d465bdc8 Guido Trotter
    as values. Rules:
141 e4376078 Iustin Pop

142 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
143 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
144 e4376078 Iustin Pop
      - don't put anything for the BGL level
145 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
146 d465bdc8 Guido Trotter

147 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
148 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
149 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
150 3977a4c1 Guido Trotter

151 e4376078 Iustin Pop
    Examples::
152 e4376078 Iustin Pop

153 e4376078 Iustin Pop
      # Acquire all nodes and one instance
154 e4376078 Iustin Pop
      self.needed_locks = {
155 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
156 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
157 e4376078 Iustin Pop
      }
158 e4376078 Iustin Pop
      # Acquire just two nodes
159 e4376078 Iustin Pop
      self.needed_locks = {
160 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
161 e4376078 Iustin Pop
      }
162 e4376078 Iustin Pop
      # Acquire no locks
163 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
164 d465bdc8 Guido Trotter

165 d465bdc8 Guido Trotter
    """
166 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
167 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
168 d465bdc8 Guido Trotter
    # time.
169 d465bdc8 Guido Trotter
    if self.REQ_BGL:
170 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
171 d465bdc8 Guido Trotter
    else:
172 d465bdc8 Guido Trotter
      raise NotImplementedError
173 d465bdc8 Guido Trotter
174 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
175 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
176 fb8dcb62 Guido Trotter

177 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
178 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
179 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
180 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
181 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
182 fb8dcb62 Guido Trotter
    default it does nothing.
183 fb8dcb62 Guido Trotter

184 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
185 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
186 fb8dcb62 Guido Trotter

187 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
188 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
189 fb8dcb62 Guido Trotter

190 fb8dcb62 Guido Trotter
    """
191 fb8dcb62 Guido Trotter
192 a8083063 Iustin Pop
  def CheckPrereq(self):
193 a8083063 Iustin Pop
    """Check prerequisites for this LU.
194 a8083063 Iustin Pop

195 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
196 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
197 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
198 a8083063 Iustin Pop
    allowed.
199 a8083063 Iustin Pop

200 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
201 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
202 a8083063 Iustin Pop

203 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
204 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
205 a8083063 Iustin Pop

206 a8083063 Iustin Pop
    """
207 a8083063 Iustin Pop
    raise NotImplementedError
208 a8083063 Iustin Pop
209 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
210 a8083063 Iustin Pop
    """Execute the LU.
211 a8083063 Iustin Pop

212 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
213 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
214 a8083063 Iustin Pop
    code, or expected.
215 a8083063 Iustin Pop

216 a8083063 Iustin Pop
    """
217 a8083063 Iustin Pop
    raise NotImplementedError
218 a8083063 Iustin Pop
219 a8083063 Iustin Pop
  def BuildHooksEnv(self):
220 a8083063 Iustin Pop
    """Build hooks environment for this LU.
221 a8083063 Iustin Pop

222 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
223 a8083063 Iustin Pop
    containing the environment that will be used for running the
224 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
225 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
226 a8083063 Iustin Pop
    the hook should run after the execution.
227 a8083063 Iustin Pop

228 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
229 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
230 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
231 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
232 a8083063 Iustin Pop

233 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
234 a8083063 Iustin Pop

235 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
236 a8083063 Iustin Pop
    not be called.
237 a8083063 Iustin Pop

238 a8083063 Iustin Pop
    """
239 a8083063 Iustin Pop
    raise NotImplementedError
240 a8083063 Iustin Pop
241 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
242 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
243 1fce5219 Guido Trotter

244 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
245 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
246 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
247 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
248 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
249 1fce5219 Guido Trotter

250 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
251 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
252 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
253 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
254 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
255 e4376078 Iustin Pop
        in the PRE phase
256 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
257 e4376078 Iustin Pop
        and hook results
258 1fce5219 Guido Trotter

259 1fce5219 Guido Trotter
    """
260 1fce5219 Guido Trotter
    return lu_result
261 1fce5219 Guido Trotter
262 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
263 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
264 43905206 Guido Trotter

265 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
266 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
267 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
268 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
269 43905206 Guido Trotter
    before.
270 43905206 Guido Trotter

271 43905206 Guido Trotter
    """
272 43905206 Guido Trotter
    if self.needed_locks is None:
273 43905206 Guido Trotter
      self.needed_locks = {}
274 43905206 Guido Trotter
    else:
275 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
276 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
277 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
278 43905206 Guido Trotter
    if expanded_name is None:
279 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
280 43905206 Guido Trotter
                                  self.op.instance_name)
281 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
282 43905206 Guido Trotter
    self.op.instance_name = expanded_name
283 43905206 Guido Trotter
284 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
285 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
286 c4a2fee1 Guido Trotter

287 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
288 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
289 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
290 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
291 c4a2fee1 Guido Trotter

292 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
293 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
294 c4a2fee1 Guido Trotter

295 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
296 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
299 c4a2fee1 Guido Trotter

300 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
301 e4376078 Iustin Pop
        self._LockInstancesNodes()
302 c4a2fee1 Guido Trotter

303 a82ce292 Guido Trotter
    @type primary_only: boolean
304 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
305 a82ce292 Guido Trotter

306 c4a2fee1 Guido Trotter
    """
307 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
308 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
311 c4a2fee1 Guido Trotter
312 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
313 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
314 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
315 c4a2fee1 Guido Trotter
    wanted_nodes = []
316 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
317 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
318 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
319 a82ce292 Guido Trotter
      if not primary_only:
320 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
321 9513b6ab Guido Trotter
322 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
324 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
325 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
326 c4a2fee1 Guido Trotter
327 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
328 c4a2fee1 Guido Trotter
329 a8083063 Iustin Pop
330 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
331 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
332 a8083063 Iustin Pop

333 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
334 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
335 a8083063 Iustin Pop

336 a8083063 Iustin Pop
  """
337 a8083063 Iustin Pop
  HPATH = None
338 a8083063 Iustin Pop
  HTYPE = None
339 a8083063 Iustin Pop
340 a8083063 Iustin Pop
341 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
342 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
343 83120a01 Michael Hanselmann

344 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
345 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
346 e4376078 Iustin Pop
  @type nodes: list
347 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
348 e4376078 Iustin Pop
  @rtype: list
349 e4376078 Iustin Pop
  @return: the list of nodes, sorted
350 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
351 83120a01 Michael Hanselmann

352 83120a01 Michael Hanselmann
  """
353 3312b702 Iustin Pop
  if not isinstance(nodes, list):
354 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
355 dcb93971 Michael Hanselmann
356 ea47808a Guido Trotter
  if not nodes:
357 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
358 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
359 dcb93971 Michael Hanselmann
360 ea47808a Guido Trotter
  wanted = []
361 ea47808a Guido Trotter
  for name in nodes:
362 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
363 ea47808a Guido Trotter
    if node is None:
364 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
365 ea47808a Guido Trotter
    wanted.append(node)
366 dcb93971 Michael Hanselmann
367 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
368 3312b702 Iustin Pop
369 3312b702 Iustin Pop
370 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
371 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
372 3312b702 Iustin Pop

373 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
374 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
375 e4376078 Iustin Pop
  @type instances: list
376 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
377 e4376078 Iustin Pop
  @rtype: list
378 e4376078 Iustin Pop
  @return: the list of instances, sorted
379 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
380 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
381 3312b702 Iustin Pop

382 3312b702 Iustin Pop
  """
383 3312b702 Iustin Pop
  if not isinstance(instances, list):
384 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
385 3312b702 Iustin Pop
386 3312b702 Iustin Pop
  if instances:
387 3312b702 Iustin Pop
    wanted = []
388 3312b702 Iustin Pop
389 3312b702 Iustin Pop
    for name in instances:
390 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
391 3312b702 Iustin Pop
      if instance is None:
392 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
393 3312b702 Iustin Pop
      wanted.append(instance)
394 3312b702 Iustin Pop
395 3312b702 Iustin Pop
  else:
396 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
397 a7f5dc98 Iustin Pop
  return wanted
398 dcb93971 Michael Hanselmann
399 dcb93971 Michael Hanselmann
400 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
401 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
402 83120a01 Michael Hanselmann

403 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param static: static fields set
405 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
406 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
407 83120a01 Michael Hanselmann

408 83120a01 Michael Hanselmann
  """
409 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
410 31bf511f Iustin Pop
  f.Extend(static)
411 31bf511f Iustin Pop
  f.Extend(dynamic)
412 dcb93971 Michael Hanselmann
413 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
414 31bf511f Iustin Pop
  if delta:
415 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
416 31bf511f Iustin Pop
                               % ",".join(delta))
417 dcb93971 Michael Hanselmann
418 dcb93971 Michael Hanselmann
419 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
420 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
421 a5961235 Iustin Pop

422 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
423 a5961235 Iustin Pop
  or None (but that it always exists).
424 a5961235 Iustin Pop

425 a5961235 Iustin Pop
  """
426 a5961235 Iustin Pop
  val = getattr(op, name, None)
427 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
428 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
429 a5961235 Iustin Pop
                               (name, str(val)))
430 a5961235 Iustin Pop
  setattr(op, name, val)
431 a5961235 Iustin Pop
432 a5961235 Iustin Pop
433 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
434 a5961235 Iustin Pop
  """Ensure that a given node is online.
435 a5961235 Iustin Pop

436 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
437 a5961235 Iustin Pop
  @param node: the node to check
438 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
439 a5961235 Iustin Pop

440 a5961235 Iustin Pop
  """
441 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
442 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
443 a5961235 Iustin Pop
444 a5961235 Iustin Pop
445 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
446 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
447 733a2b6a Iustin Pop

448 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
449 733a2b6a Iustin Pop
  @param node: the node to check
450 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
451 733a2b6a Iustin Pop

452 733a2b6a Iustin Pop
  """
453 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
454 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
455 733a2b6a Iustin Pop
456 733a2b6a Iustin Pop
457 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
458 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
459 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
460 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
463 e4376078 Iustin Pop

464 e4376078 Iustin Pop
  @type name: string
465 e4376078 Iustin Pop
  @param name: the name of the instance
466 e4376078 Iustin Pop
  @type primary_node: string
467 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
468 e4376078 Iustin Pop
  @type secondary_nodes: list
469 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
470 e4376078 Iustin Pop
  @type os_type: string
471 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
472 0d68c45d Iustin Pop
  @type status: boolean
473 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
474 e4376078 Iustin Pop
  @type memory: string
475 e4376078 Iustin Pop
  @param memory: the memory size of the instance
476 e4376078 Iustin Pop
  @type vcpus: string
477 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
478 e4376078 Iustin Pop
  @type nics: list
479 5e3d3eb3 Guido Trotter
  @param nics: list of tuples (ip, mac, mode, link) representing
480 5e3d3eb3 Guido Trotter
      the NICs the instance has
481 2c2690c9 Iustin Pop
  @type disk_template: string
482 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
483 2c2690c9 Iustin Pop
  @type disks: list
484 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
485 67fc3042 Iustin Pop
  @type bep: dict
486 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
487 67fc3042 Iustin Pop
  @type hvp: dict
488 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
489 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
490 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
491 e4376078 Iustin Pop
  @rtype: dict
492 e4376078 Iustin Pop
  @return: the hook environment for this instance
493 ecb215b5 Michael Hanselmann

494 396e1b78 Michael Hanselmann
  """
495 0d68c45d Iustin Pop
  if status:
496 0d68c45d Iustin Pop
    str_status = "up"
497 0d68c45d Iustin Pop
  else:
498 0d68c45d Iustin Pop
    str_status = "down"
499 396e1b78 Michael Hanselmann
  env = {
500 0e137c28 Iustin Pop
    "OP_TARGET": name,
501 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
502 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
503 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
504 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
505 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
506 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
507 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
508 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
509 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
510 396e1b78 Michael Hanselmann
  }
511 396e1b78 Michael Hanselmann
512 396e1b78 Michael Hanselmann
  if nics:
513 396e1b78 Michael Hanselmann
    nic_count = len(nics)
514 62f0dd02 Guido Trotter
    for idx, (ip, mac, mode, link) in enumerate(nics):
515 396e1b78 Michael Hanselmann
      if ip is None:
516 396e1b78 Michael Hanselmann
        ip = ""
517 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
518 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
519 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_MODE" % idx] = mode
520 62f0dd02 Guido Trotter
      env["INSTANCE_NIC%d_LINK" % idx] = link
521 62f0dd02 Guido Trotter
      if mode == constants.NIC_MODE_BRIDGED:
522 62f0dd02 Guido Trotter
        env["INSTANCE_NIC%d_BRIDGE" % idx] = link
523 396e1b78 Michael Hanselmann
  else:
524 396e1b78 Michael Hanselmann
    nic_count = 0
525 396e1b78 Michael Hanselmann
526 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
527 396e1b78 Michael Hanselmann
528 2c2690c9 Iustin Pop
  if disks:
529 2c2690c9 Iustin Pop
    disk_count = len(disks)
530 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
531 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
532 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
533 2c2690c9 Iustin Pop
  else:
534 2c2690c9 Iustin Pop
    disk_count = 0
535 2c2690c9 Iustin Pop
536 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
537 2c2690c9 Iustin Pop
538 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
539 67fc3042 Iustin Pop
    for key, value in source.items():
540 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
541 67fc3042 Iustin Pop
542 396e1b78 Michael Hanselmann
  return env
543 396e1b78 Michael Hanselmann
544 f9b10246 Guido Trotter
def _NICListToTuple(lu, nics):
545 62f0dd02 Guido Trotter
  """Build a list of nic information tuples.
546 62f0dd02 Guido Trotter

547 f9b10246 Guido Trotter
  This list is suitable to be passed to _BuildInstanceHookEnv or as a return
548 f9b10246 Guido Trotter
  value in LUQueryInstanceData.
549 62f0dd02 Guido Trotter

550 62f0dd02 Guido Trotter
  @type lu:  L{LogicalUnit}
551 62f0dd02 Guido Trotter
  @param lu: the logical unit on whose behalf we execute
552 62f0dd02 Guido Trotter
  @type nics: list of L{objects.NIC}
553 62f0dd02 Guido Trotter
  @param nics: list of nics to convert to hooks tuples
554 62f0dd02 Guido Trotter

555 62f0dd02 Guido Trotter
  """
556 62f0dd02 Guido Trotter
  hooks_nics = []
557 62f0dd02 Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
558 62f0dd02 Guido Trotter
  for nic in nics:
559 62f0dd02 Guido Trotter
    ip = nic.ip
560 62f0dd02 Guido Trotter
    mac = nic.mac
561 62f0dd02 Guido Trotter
    filled_params = objects.FillDict(c_nicparams, nic.nicparams)
562 62f0dd02 Guido Trotter
    mode = filled_params[constants.NIC_MODE]
563 62f0dd02 Guido Trotter
    link = filled_params[constants.NIC_LINK]
564 62f0dd02 Guido Trotter
    hooks_nics.append((ip, mac, mode, link))
565 62f0dd02 Guido Trotter
  return hooks_nics
566 396e1b78 Michael Hanselmann
567 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
568 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
569 ecb215b5 Michael Hanselmann

570 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
571 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
572 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
573 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
574 e4376078 Iustin Pop
      environment
575 e4376078 Iustin Pop
  @type override: dict
576 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
577 e4376078 Iustin Pop
      our values
578 e4376078 Iustin Pop
  @rtype: dict
579 e4376078 Iustin Pop
  @return: the hook environment dictionary
580 e4376078 Iustin Pop

581 ecb215b5 Michael Hanselmann
  """
582 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
583 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
584 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
585 396e1b78 Michael Hanselmann
  args = {
586 396e1b78 Michael Hanselmann
    'name': instance.name,
587 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
588 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
589 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
590 0d68c45d Iustin Pop
    'status': instance.admin_up,
591 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
592 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
593 f9b10246 Guido Trotter
    'nics': _NICListToTuple(lu, instance.nics),
594 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
595 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
596 67fc3042 Iustin Pop
    'bep': bep,
597 67fc3042 Iustin Pop
    'hvp': hvp,
598 67fc3042 Iustin Pop
    'hypervisor': instance.hypervisor,
599 396e1b78 Michael Hanselmann
  }
600 396e1b78 Michael Hanselmann
  if override:
601 396e1b78 Michael Hanselmann
    args.update(override)
602 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
603 396e1b78 Michael Hanselmann
604 396e1b78 Michael Hanselmann
605 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
606 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
607 ec0292f1 Iustin Pop

608 ec0292f1 Iustin Pop
  """
609 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
610 ec0292f1 Iustin Pop
  if mod_list:
611 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
612 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
613 ec0292f1 Iustin Pop
    for name in mod_list:
614 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
615 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
616 ec0292f1 Iustin Pop
  if mc_now > mc_max:
617 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
618 ec0292f1 Iustin Pop
               (mc_now, mc_max))
619 ec0292f1 Iustin Pop
620 ec0292f1 Iustin Pop
621 b165e77e Guido Trotter
def _CheckNicsBridgesExist(lu, target_nics, target_node,
622 b165e77e Guido Trotter
                               profile=constants.PP_DEFAULT):
623 b165e77e Guido Trotter
  """Check that the brigdes needed by a list of nics exist.
624 b165e77e Guido Trotter

625 b165e77e Guido Trotter
  """
626 b165e77e Guido Trotter
  c_nicparams = lu.cfg.GetClusterInfo().nicparams[profile]
627 b165e77e Guido Trotter
  paramslist = [objects.FillDict(c_nicparams, nic.nicparams)
628 b165e77e Guido Trotter
                for nic in target_nics]
629 b165e77e Guido Trotter
  brlist = [params[constants.NIC_LINK] for params in paramslist
630 b165e77e Guido Trotter
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
631 b165e77e Guido Trotter
  if brlist:
632 b165e77e Guido Trotter
    result = lu.rpc.call_bridges_exist(target_node, brlist)
633 4c4e4e1e Iustin Pop
    result.Raise("Error checking bridges on destination node '%s'" %
634 4c4e4e1e Iustin Pop
                 target_node, prereq=True)
635 b165e77e Guido Trotter
636 b165e77e Guido Trotter
637 b165e77e Guido Trotter
def _CheckInstanceBridgesExist(lu, instance, node=None):
638 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
639 bf6929a2 Alexander Schreiber

640 bf6929a2 Alexander Schreiber
  """
641 b165e77e Guido Trotter
  if node is None:
642 29921401 Iustin Pop
    node = instance.primary_node
643 b165e77e Guido Trotter
  _CheckNicsBridgesExist(lu, instance.nics, node)
644 bf6929a2 Alexander Schreiber
645 bf6929a2 Alexander Schreiber
646 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
647 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
648 a8083063 Iustin Pop

649 a8083063 Iustin Pop
  """
650 a8083063 Iustin Pop
  _OP_REQP = []
651 a8083063 Iustin Pop
652 a8083063 Iustin Pop
  def CheckPrereq(self):
653 a8083063 Iustin Pop
    """Check prerequisites.
654 a8083063 Iustin Pop

655 a8083063 Iustin Pop
    This checks whether the cluster is empty.
656 a8083063 Iustin Pop

657 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
658 a8083063 Iustin Pop

659 a8083063 Iustin Pop
    """
660 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
663 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
664 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
665 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
666 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
667 db915bd1 Michael Hanselmann
    if instancelist:
668 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
669 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
670 a8083063 Iustin Pop
671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
672 a8083063 Iustin Pop
    """Destroys the cluster.
673 a8083063 Iustin Pop

674 a8083063 Iustin Pop
    """
675 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
676 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
677 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
678 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
679 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
680 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
681 140aa4a8 Iustin Pop
    return master
682 a8083063 Iustin Pop
683 a8083063 Iustin Pop
684 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
685 a8083063 Iustin Pop
  """Verifies the cluster status.
686 a8083063 Iustin Pop

687 a8083063 Iustin Pop
  """
688 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
689 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
690 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
691 d4b9d97f Guido Trotter
  REQ_BGL = False
692 d4b9d97f Guido Trotter
693 d4b9d97f Guido Trotter
  def ExpandNames(self):
694 d4b9d97f Guido Trotter
    self.needed_locks = {
695 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
696 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
697 d4b9d97f Guido Trotter
    }
698 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
699 a8083063 Iustin Pop
700 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
701 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
702 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
703 a8083063 Iustin Pop
    """Run multiple tests against a node.
704 a8083063 Iustin Pop

705 112f18a5 Iustin Pop
    Test list:
706 e4376078 Iustin Pop

707 a8083063 Iustin Pop
      - compares ganeti version
708 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
709 a8083063 Iustin Pop
      - checks config file checksum
710 a8083063 Iustin Pop
      - checks ssh to other nodes
711 a8083063 Iustin Pop

712 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
713 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
714 e4376078 Iustin Pop
    @param file_list: required list of files
715 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
716 e4376078 Iustin Pop
    @param node_result: the results from the node
717 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
718 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
719 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
720 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
721 6d2e83d5 Iustin Pop
        and their running status
722 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
723 098c0958 Michael Hanselmann

724 a8083063 Iustin Pop
    """
725 112f18a5 Iustin Pop
    node = nodeinfo.name
726 25361b9a Iustin Pop
727 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
728 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
729 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
730 25361b9a Iustin Pop
      return True
731 25361b9a Iustin Pop
732 a8083063 Iustin Pop
    # compares ganeti version
733 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
734 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
735 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
736 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
737 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
738 a8083063 Iustin Pop
      return True
739 a8083063 Iustin Pop
740 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
741 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
742 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
743 a8083063 Iustin Pop
      return True
744 a8083063 Iustin Pop
745 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
746 a8083063 Iustin Pop
747 a8083063 Iustin Pop
    bad = False
748 e9ce0a64 Iustin Pop
749 e9ce0a64 Iustin Pop
    # full package version
750 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
751 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
752 e9ce0a64 Iustin Pop
                  " node %s %s" %
753 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
754 e9ce0a64 Iustin Pop
755 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
756 cc9e1230 Guido Trotter
    if vg_name is not None:
757 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
758 cc9e1230 Guido Trotter
      if not vglist:
759 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
760 cc9e1230 Guido Trotter
                        (node,))
761 a8083063 Iustin Pop
        bad = True
762 cc9e1230 Guido Trotter
      else:
763 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
764 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
765 cc9e1230 Guido Trotter
        if vgstatus:
766 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
767 cc9e1230 Guido Trotter
          bad = True
768 a8083063 Iustin Pop
769 a8083063 Iustin Pop
    # checks config file checksum
770 a8083063 Iustin Pop
771 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
772 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
773 a8083063 Iustin Pop
      bad = True
774 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
775 a8083063 Iustin Pop
    else:
776 a8083063 Iustin Pop
      for file_name in file_list:
777 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
778 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
779 a8083063 Iustin Pop
        if file_name not in remote_cksum:
780 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
781 112f18a5 Iustin Pop
            bad = True
782 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
783 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
784 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
785 112f18a5 Iustin Pop
            bad = True
786 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
787 112f18a5 Iustin Pop
          else:
788 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
789 112f18a5 Iustin Pop
            bad = True
790 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
791 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
792 112f18a5 Iustin Pop
        else:
793 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
794 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
795 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
796 112f18a5 Iustin Pop
                        " candidates" % file_name)
797 a8083063 Iustin Pop
798 25361b9a Iustin Pop
    # checks ssh to any
799 25361b9a Iustin Pop
800 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
801 a8083063 Iustin Pop
      bad = True
802 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
803 a8083063 Iustin Pop
    else:
804 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
805 a8083063 Iustin Pop
        bad = True
806 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
807 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
808 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
809 25361b9a Iustin Pop
810 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
811 9d4bfc96 Iustin Pop
      bad = True
812 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
813 9d4bfc96 Iustin Pop
    else:
814 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
815 9d4bfc96 Iustin Pop
        bad = True
816 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
817 9d4bfc96 Iustin Pop
        for node in nlist:
818 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
819 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
820 9d4bfc96 Iustin Pop
821 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
822 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
823 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
824 e69d05fd Iustin Pop
        if hv_result is not None:
825 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
826 e69d05fd Iustin Pop
                      (hv_name, hv_result))
827 6d2e83d5 Iustin Pop
828 6d2e83d5 Iustin Pop
    # check used drbd list
829 cc9e1230 Guido Trotter
    if vg_name is not None:
830 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
831 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
832 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
833 cc9e1230 Guido Trotter
                    str(used_minors))
834 cc9e1230 Guido Trotter
      else:
835 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
836 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
837 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
838 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
839 cc9e1230 Guido Trotter
            bad = True
840 cc9e1230 Guido Trotter
        for minor in used_minors:
841 cc9e1230 Guido Trotter
          if minor not in drbd_map:
842 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
843 35e994e9 Iustin Pop
                        minor)
844 cc9e1230 Guido Trotter
            bad = True
845 6d2e83d5 Iustin Pop
846 a8083063 Iustin Pop
    return bad
847 a8083063 Iustin Pop
848 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
849 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
850 a8083063 Iustin Pop
    """Verify an instance.
851 a8083063 Iustin Pop

852 a8083063 Iustin Pop
    This function checks to see if the required block devices are
853 a8083063 Iustin Pop
    available on the instance's node.
854 a8083063 Iustin Pop

855 a8083063 Iustin Pop
    """
856 a8083063 Iustin Pop
    bad = False
857 a8083063 Iustin Pop
858 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
    node_vol_should = {}
861 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
    for node in node_vol_should:
864 0a66c968 Iustin Pop
      if node in n_offline:
865 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
866 0a66c968 Iustin Pop
        continue
867 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
868 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
869 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
870 a8083063 Iustin Pop
                          (volume, node))
871 a8083063 Iustin Pop
          bad = True
872 a8083063 Iustin Pop
873 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
874 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
875 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
876 0a66c968 Iustin Pop
          node_current not in n_offline):
877 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
878 a8083063 Iustin Pop
                        (instance, node_current))
879 a8083063 Iustin Pop
        bad = True
880 a8083063 Iustin Pop
881 a8083063 Iustin Pop
    for node in node_instance:
882 a8083063 Iustin Pop
      if (not node == node_current):
883 a8083063 Iustin Pop
        if instance in node_instance[node]:
884 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
885 a8083063 Iustin Pop
                          (instance, node))
886 a8083063 Iustin Pop
          bad = True
887 a8083063 Iustin Pop
888 6a438c98 Michael Hanselmann
    return bad
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
891 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
892 a8083063 Iustin Pop

893 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
894 a8083063 Iustin Pop
    reported as unknown.
895 a8083063 Iustin Pop

896 a8083063 Iustin Pop
    """
897 a8083063 Iustin Pop
    bad = False
898 a8083063 Iustin Pop
899 a8083063 Iustin Pop
    for node in node_vol_is:
900 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
901 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
902 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
903 a8083063 Iustin Pop
                      (volume, node))
904 a8083063 Iustin Pop
          bad = True
905 a8083063 Iustin Pop
    return bad
906 a8083063 Iustin Pop
907 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
908 a8083063 Iustin Pop
    """Verify the list of running instances.
909 a8083063 Iustin Pop

910 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
911 a8083063 Iustin Pop

912 a8083063 Iustin Pop
    """
913 a8083063 Iustin Pop
    bad = False
914 a8083063 Iustin Pop
    for node in node_instance:
915 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
916 a8083063 Iustin Pop
        if runninginstance not in instancelist:
917 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
918 a8083063 Iustin Pop
                          (runninginstance, node))
919 a8083063 Iustin Pop
          bad = True
920 a8083063 Iustin Pop
    return bad
921 a8083063 Iustin Pop
922 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
923 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
924 2b3b6ddd Guido Trotter

925 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
926 2b3b6ddd Guido Trotter
    was primary for.
927 2b3b6ddd Guido Trotter

928 2b3b6ddd Guido Trotter
    """
929 2b3b6ddd Guido Trotter
    bad = False
930 2b3b6ddd Guido Trotter
931 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
932 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
933 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
934 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
935 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
936 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
937 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
938 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
939 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
940 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
941 2b3b6ddd Guido Trotter
        needed_mem = 0
942 2b3b6ddd Guido Trotter
        for instance in instances:
943 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
944 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
945 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
946 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
947 5bbd3f7f Michael Hanselmann
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
948 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
949 2b3b6ddd Guido Trotter
          bad = True
950 2b3b6ddd Guido Trotter
    return bad
951 2b3b6ddd Guido Trotter
952 a8083063 Iustin Pop
  def CheckPrereq(self):
953 a8083063 Iustin Pop
    """Check prerequisites.
954 a8083063 Iustin Pop

955 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
956 e54c4c5e Guido Trotter
    all its members are valid.
957 a8083063 Iustin Pop

958 a8083063 Iustin Pop
    """
959 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
960 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
961 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
962 a8083063 Iustin Pop
963 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
964 d8fff41c Guido Trotter
    """Build hooks env.
965 d8fff41c Guido Trotter

966 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
967 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
968 d8fff41c Guido Trotter

969 d8fff41c Guido Trotter
    """
970 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
971 35e994e9 Iustin Pop
    env = {
972 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
973 35e994e9 Iustin Pop
      }
974 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
975 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
976 35e994e9 Iustin Pop
977 d8fff41c Guido Trotter
    return env, [], all_nodes
978 d8fff41c Guido Trotter
979 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
980 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
981 a8083063 Iustin Pop

982 a8083063 Iustin Pop
    """
983 a8083063 Iustin Pop
    bad = False
984 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
985 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
986 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
987 a8083063 Iustin Pop
988 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
989 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
990 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
991 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
992 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
993 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
994 6d2e83d5 Iustin Pop
                        for iname in instancelist)
995 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
996 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
997 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
998 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
999 a8083063 Iustin Pop
    node_volume = {}
1000 a8083063 Iustin Pop
    node_instance = {}
1001 9c9c7d30 Guido Trotter
    node_info = {}
1002 26b6af5e Guido Trotter
    instance_cfg = {}
1003 a8083063 Iustin Pop
1004 a8083063 Iustin Pop
    # FIXME: verify OS list
1005 a8083063 Iustin Pop
    # do local checksums
1006 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
1007 112f18a5 Iustin Pop
1008 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
1009 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
1010 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
1011 112f18a5 Iustin Pop
    file_names.extend(master_files)
1012 112f18a5 Iustin Pop
1013 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
1014 a8083063 Iustin Pop
1015 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
1016 a8083063 Iustin Pop
    node_verify_param = {
1017 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
1018 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
1019 82e37788 Iustin Pop
                              if not node.offline],
1020 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
1021 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
1022 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
1023 82e37788 Iustin Pop
                                 if not node.offline],
1024 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
1025 25361b9a Iustin Pop
      constants.NV_VERSION: None,
1026 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
1027 a8083063 Iustin Pop
      }
1028 cc9e1230 Guido Trotter
    if vg_name is not None:
1029 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
1030 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
1031 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
1032 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
1033 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
1034 a8083063 Iustin Pop
1035 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1036 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1037 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1038 6d2e83d5 Iustin Pop
1039 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1040 112f18a5 Iustin Pop
      node = node_i.name
1041 25361b9a Iustin Pop
1042 0a66c968 Iustin Pop
      if node_i.offline:
1043 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1044 0a66c968 Iustin Pop
        n_offline.append(node)
1045 0a66c968 Iustin Pop
        continue
1046 0a66c968 Iustin Pop
1047 112f18a5 Iustin Pop
      if node == master_node:
1048 25361b9a Iustin Pop
        ntype = "master"
1049 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1050 25361b9a Iustin Pop
        ntype = "master candidate"
1051 22f0f71d Iustin Pop
      elif node_i.drained:
1052 22f0f71d Iustin Pop
        ntype = "drained"
1053 22f0f71d Iustin Pop
        n_drained.append(node)
1054 112f18a5 Iustin Pop
      else:
1055 25361b9a Iustin Pop
        ntype = "regular"
1056 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1057 25361b9a Iustin Pop
1058 4c4e4e1e Iustin Pop
      msg = all_nvinfo[node].fail_msg
1059 6f68a739 Iustin Pop
      if msg:
1060 6f68a739 Iustin Pop
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1061 25361b9a Iustin Pop
        bad = True
1062 25361b9a Iustin Pop
        continue
1063 25361b9a Iustin Pop
1064 6f68a739 Iustin Pop
      nresult = all_nvinfo[node].payload
1065 6d2e83d5 Iustin Pop
      node_drbd = {}
1066 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1067 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1068 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1069 c614e5fb Iustin Pop
                      instance)
1070 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1071 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1072 c614e5fb Iustin Pop
          # unallocated minor in use)
1073 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1074 c614e5fb Iustin Pop
        else:
1075 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1076 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1077 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1078 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1079 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1080 a8083063 Iustin Pop
      bad = bad or result
1081 a8083063 Iustin Pop
1082 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1083 cc9e1230 Guido Trotter
      if vg_name is None:
1084 cc9e1230 Guido Trotter
        node_volume[node] = {}
1085 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1086 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1087 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1088 b63ed789 Iustin Pop
        bad = True
1089 b63ed789 Iustin Pop
        node_volume[node] = {}
1090 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1091 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1092 a8083063 Iustin Pop
        bad = True
1093 a8083063 Iustin Pop
        continue
1094 b63ed789 Iustin Pop
      else:
1095 25361b9a Iustin Pop
        node_volume[node] = lvdata
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
      # node_instance
1098 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1099 25361b9a Iustin Pop
      if not isinstance(idata, list):
1100 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1101 25361b9a Iustin Pop
                    (node,))
1102 a8083063 Iustin Pop
        bad = True
1103 a8083063 Iustin Pop
        continue
1104 a8083063 Iustin Pop
1105 25361b9a Iustin Pop
      node_instance[node] = idata
1106 a8083063 Iustin Pop
1107 9c9c7d30 Guido Trotter
      # node_info
1108 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1109 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1110 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1111 9c9c7d30 Guido Trotter
        bad = True
1112 9c9c7d30 Guido Trotter
        continue
1113 9c9c7d30 Guido Trotter
1114 9c9c7d30 Guido Trotter
      try:
1115 9c9c7d30 Guido Trotter
        node_info[node] = {
1116 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1117 93e4c50b Guido Trotter
          "pinst": [],
1118 93e4c50b Guido Trotter
          "sinst": [],
1119 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1120 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1121 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1122 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1123 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1124 36e7da50 Guido Trotter
          # secondary.
1125 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1126 9c9c7d30 Guido Trotter
        }
1127 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1128 cc9e1230 Guido Trotter
        if vg_name is not None:
1129 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1130 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1131 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1132 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1133 9a198532 Iustin Pop
                        (node, vg_name))
1134 9a198532 Iustin Pop
            bad = True
1135 9a198532 Iustin Pop
            continue
1136 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1137 9a198532 Iustin Pop
      except (ValueError, KeyError):
1138 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1139 9a198532 Iustin Pop
                    " from node %s" % (node,))
1140 9c9c7d30 Guido Trotter
        bad = True
1141 9c9c7d30 Guido Trotter
        continue
1142 9c9c7d30 Guido Trotter
1143 a8083063 Iustin Pop
    node_vol_should = {}
1144 a8083063 Iustin Pop
1145 a8083063 Iustin Pop
    for instance in instancelist:
1146 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1147 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1148 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1149 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1150 c5705f58 Guido Trotter
      bad = bad or result
1151 832261fd Iustin Pop
      inst_nodes_offline = []
1152 a8083063 Iustin Pop
1153 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1154 a8083063 Iustin Pop
1155 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1156 26b6af5e Guido Trotter
1157 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1158 93e4c50b Guido Trotter
      if pnode in node_info:
1159 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1160 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1161 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1162 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1163 93e4c50b Guido Trotter
        bad = True
1164 93e4c50b Guido Trotter
1165 832261fd Iustin Pop
      if pnode in n_offline:
1166 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1167 832261fd Iustin Pop
1168 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1169 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1170 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1171 93e4c50b Guido Trotter
      # supported either.
1172 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1173 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1174 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1175 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1176 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1177 93e4c50b Guido Trotter
                    % instance)
1178 93e4c50b Guido Trotter
1179 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1180 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1181 3924700f Iustin Pop
1182 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1183 93e4c50b Guido Trotter
        if snode in node_info:
1184 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1185 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1186 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1187 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1188 0a66c968 Iustin Pop
        elif snode not in n_offline:
1189 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1190 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1191 832261fd Iustin Pop
          bad = True
1192 832261fd Iustin Pop
        if snode in n_offline:
1193 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1194 832261fd Iustin Pop
1195 832261fd Iustin Pop
      if inst_nodes_offline:
1196 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1197 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1198 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1199 832261fd Iustin Pop
        bad = True
1200 93e4c50b Guido Trotter
1201 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1202 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1203 a8083063 Iustin Pop
                                       feedback_fn)
1204 a8083063 Iustin Pop
    bad = bad or result
1205 a8083063 Iustin Pop
1206 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1207 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1208 a8083063 Iustin Pop
                                         feedback_fn)
1209 a8083063 Iustin Pop
    bad = bad or result
1210 a8083063 Iustin Pop
1211 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1212 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1213 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1214 e54c4c5e Guido Trotter
      bad = bad or result
1215 2b3b6ddd Guido Trotter
1216 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1217 2b3b6ddd Guido Trotter
    if i_non_redundant:
1218 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1219 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1220 2b3b6ddd Guido Trotter
1221 3924700f Iustin Pop
    if i_non_a_balanced:
1222 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1223 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1224 3924700f Iustin Pop
1225 0a66c968 Iustin Pop
    if n_offline:
1226 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1227 0a66c968 Iustin Pop
1228 22f0f71d Iustin Pop
    if n_drained:
1229 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1230 22f0f71d Iustin Pop
1231 34290825 Michael Hanselmann
    return not bad
1232 a8083063 Iustin Pop
1233 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1234 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1235 e4376078 Iustin Pop

1236 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1237 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1238 d8fff41c Guido Trotter

1239 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1240 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1241 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1242 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1243 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1244 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1245 e4376078 Iustin Pop
        and hook results
1246 d8fff41c Guido Trotter

1247 d8fff41c Guido Trotter
    """
1248 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1249 38206f3c Iustin Pop
    # their results
1250 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1251 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1252 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1253 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1254 d8fff41c Guido Trotter
      if not hooks_results:
1255 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1256 d8fff41c Guido Trotter
        lu_result = 1
1257 d8fff41c Guido Trotter
      else:
1258 d8fff41c Guido Trotter
        for node_name in hooks_results:
1259 d8fff41c Guido Trotter
          show_node_header = True
1260 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1261 4c4e4e1e Iustin Pop
          msg = res.fail_msg
1262 3fb4f740 Iustin Pop
          if msg:
1263 0a66c968 Iustin Pop
            if res.offline:
1264 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1265 0a66c968 Iustin Pop
              continue
1266 3fb4f740 Iustin Pop
            feedback_fn("    Communication failure in hooks execution: %s" %
1267 3fb4f740 Iustin Pop
                        msg)
1268 d8fff41c Guido Trotter
            lu_result = 1
1269 d8fff41c Guido Trotter
            continue
1270 3fb4f740 Iustin Pop
          for script, hkr, output in res.payload:
1271 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1272 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1273 d8fff41c Guido Trotter
              # failing hooks on that node
1274 d8fff41c Guido Trotter
              if show_node_header:
1275 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1276 d8fff41c Guido Trotter
                show_node_header = False
1277 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1278 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1279 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1280 d8fff41c Guido Trotter
              lu_result = 1
1281 d8fff41c Guido Trotter
1282 d8fff41c Guido Trotter
      return lu_result
1283 d8fff41c Guido Trotter
1284 a8083063 Iustin Pop
1285 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1286 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1287 2c95a8d4 Iustin Pop

1288 2c95a8d4 Iustin Pop
  """
1289 2c95a8d4 Iustin Pop
  _OP_REQP = []
1290 d4b9d97f Guido Trotter
  REQ_BGL = False
1291 d4b9d97f Guido Trotter
1292 d4b9d97f Guido Trotter
  def ExpandNames(self):
1293 d4b9d97f Guido Trotter
    self.needed_locks = {
1294 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1295 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1296 d4b9d97f Guido Trotter
    }
1297 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1298 2c95a8d4 Iustin Pop
1299 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1300 2c95a8d4 Iustin Pop
    """Check prerequisites.
1301 2c95a8d4 Iustin Pop

1302 2c95a8d4 Iustin Pop
    This has no prerequisites.
1303 2c95a8d4 Iustin Pop

1304 2c95a8d4 Iustin Pop
    """
1305 2c95a8d4 Iustin Pop
    pass
1306 2c95a8d4 Iustin Pop
1307 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1308 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1309 2c95a8d4 Iustin Pop

1310 29d376ec Iustin Pop
    @rtype: tuple of three items
1311 29d376ec Iustin Pop
    @return: a tuple of (dict of node-to-node_error, list of instances
1312 29d376ec Iustin Pop
        which need activate-disks, dict of instance: (node, volume) for
1313 29d376ec Iustin Pop
        missing volumes
1314 29d376ec Iustin Pop

1315 2c95a8d4 Iustin Pop
    """
1316 29d376ec Iustin Pop
    result = res_nodes, res_instances, res_missing = {}, [], {}
1317 2c95a8d4 Iustin Pop
1318 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1319 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1320 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1321 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1322 2c95a8d4 Iustin Pop
1323 2c95a8d4 Iustin Pop
    nv_dict = {}
1324 2c95a8d4 Iustin Pop
    for inst in instances:
1325 2c95a8d4 Iustin Pop
      inst_lvs = {}
1326 0d68c45d Iustin Pop
      if (not inst.admin_up or
1327 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1328 2c95a8d4 Iustin Pop
        continue
1329 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1330 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1331 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1332 2c95a8d4 Iustin Pop
        for vol in vol_list:
1333 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1334 2c95a8d4 Iustin Pop
1335 2c95a8d4 Iustin Pop
    if not nv_dict:
1336 2c95a8d4 Iustin Pop
      return result
1337 2c95a8d4 Iustin Pop
1338 b2a6ccd4 Iustin Pop
    node_lvs = self.rpc.call_lv_list(nodes, vg_name)
1339 2c95a8d4 Iustin Pop
1340 2c95a8d4 Iustin Pop
    for node in nodes:
1341 2c95a8d4 Iustin Pop
      # node_volume
1342 29d376ec Iustin Pop
      node_res = node_lvs[node]
1343 29d376ec Iustin Pop
      if node_res.offline:
1344 ea9ddc07 Iustin Pop
        continue
1345 4c4e4e1e Iustin Pop
      msg = node_res.fail_msg
1346 29d376ec Iustin Pop
      if msg:
1347 29d376ec Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1348 29d376ec Iustin Pop
        res_nodes[node] = msg
1349 2c95a8d4 Iustin Pop
        continue
1350 2c95a8d4 Iustin Pop
1351 29d376ec Iustin Pop
      lvs = node_res.payload
1352 29d376ec Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.items():
1353 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1354 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1355 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1356 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1357 2c95a8d4 Iustin Pop
1358 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1359 b63ed789 Iustin Pop
    # data better
1360 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1361 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1362 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1363 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1364 b63ed789 Iustin Pop
1365 2c95a8d4 Iustin Pop
    return result
1366 2c95a8d4 Iustin Pop
1367 2c95a8d4 Iustin Pop
1368 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1369 07bd8a51 Iustin Pop
  """Rename the cluster.
1370 07bd8a51 Iustin Pop

1371 07bd8a51 Iustin Pop
  """
1372 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1373 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1374 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1375 07bd8a51 Iustin Pop
1376 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1377 07bd8a51 Iustin Pop
    """Build hooks env.
1378 07bd8a51 Iustin Pop

1379 07bd8a51 Iustin Pop
    """
1380 07bd8a51 Iustin Pop
    env = {
1381 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1382 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1383 07bd8a51 Iustin Pop
      }
1384 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1385 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1386 07bd8a51 Iustin Pop
1387 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1388 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1389 07bd8a51 Iustin Pop

1390 07bd8a51 Iustin Pop
    """
1391 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1392 07bd8a51 Iustin Pop
1393 bcf043c9 Iustin Pop
    new_name = hostname.name
1394 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1395 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1396 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1397 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1398 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1399 07bd8a51 Iustin Pop
                                 " cluster has changed")
1400 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1401 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1402 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1403 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1404 07bd8a51 Iustin Pop
                                   new_ip)
1405 07bd8a51 Iustin Pop
1406 07bd8a51 Iustin Pop
    self.op.name = new_name
1407 07bd8a51 Iustin Pop
1408 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1409 07bd8a51 Iustin Pop
    """Rename the cluster.
1410 07bd8a51 Iustin Pop

1411 07bd8a51 Iustin Pop
    """
1412 07bd8a51 Iustin Pop
    clustername = self.op.name
1413 07bd8a51 Iustin Pop
    ip = self.ip
1414 07bd8a51 Iustin Pop
1415 07bd8a51 Iustin Pop
    # shutdown the master IP
1416 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1417 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1418 4c4e4e1e Iustin Pop
    result.Raise("Could not disable the master role")
1419 07bd8a51 Iustin Pop
1420 07bd8a51 Iustin Pop
    try:
1421 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1422 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1423 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1424 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1425 ec85e3d5 Iustin Pop
1426 ec85e3d5 Iustin Pop
      # update the known hosts file
1427 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1428 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1429 ec85e3d5 Iustin Pop
      try:
1430 ec85e3d5 Iustin Pop
        node_list.remove(master)
1431 ec85e3d5 Iustin Pop
      except ValueError:
1432 ec85e3d5 Iustin Pop
        pass
1433 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1434 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1435 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1436 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1437 6f7d4e75 Iustin Pop
        if msg:
1438 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1439 6f7d4e75 Iustin Pop
                 (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
1440 6f7d4e75 Iustin Pop
          self.proc.LogWarning(msg)
1441 ec85e3d5 Iustin Pop
1442 07bd8a51 Iustin Pop
    finally:
1443 3583908a Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1444 4c4e4e1e Iustin Pop
      msg = result.fail_msg
1445 b726aff0 Iustin Pop
      if msg:
1446 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1447 b726aff0 Iustin Pop
                        " the master, please restart manually: %s", msg)
1448 07bd8a51 Iustin Pop
1449 07bd8a51 Iustin Pop
1450 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1451 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1452 8084f9f6 Manuel Franceschini

1453 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1454 e4376078 Iustin Pop
  @param disk: the disk to check
1455 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1456 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1457 8084f9f6 Manuel Franceschini

1458 8084f9f6 Manuel Franceschini
  """
1459 8084f9f6 Manuel Franceschini
  if disk.children:
1460 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1461 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1462 8084f9f6 Manuel Franceschini
        return True
1463 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1464 8084f9f6 Manuel Franceschini
1465 8084f9f6 Manuel Franceschini
1466 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1467 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1468 8084f9f6 Manuel Franceschini

1469 8084f9f6 Manuel Franceschini
  """
1470 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1471 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1472 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1473 c53279cf Guido Trotter
  REQ_BGL = False
1474 c53279cf Guido Trotter
1475 3994f455 Iustin Pop
  def CheckArguments(self):
1476 4b7735f9 Iustin Pop
    """Check parameters
1477 4b7735f9 Iustin Pop

1478 4b7735f9 Iustin Pop
    """
1479 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1480 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1481 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1482 4b7735f9 Iustin Pop
      try:
1483 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1484 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1485 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1486 4b7735f9 Iustin Pop
                                   str(err))
1487 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1488 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1489 4b7735f9 Iustin Pop
1490 c53279cf Guido Trotter
  def ExpandNames(self):
1491 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1492 c53279cf Guido Trotter
    # all nodes to be modified.
1493 c53279cf Guido Trotter
    self.needed_locks = {
1494 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1495 c53279cf Guido Trotter
    }
1496 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1497 8084f9f6 Manuel Franceschini
1498 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1499 8084f9f6 Manuel Franceschini
    """Build hooks env.
1500 8084f9f6 Manuel Franceschini

1501 8084f9f6 Manuel Franceschini
    """
1502 8084f9f6 Manuel Franceschini
    env = {
1503 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1504 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1505 8084f9f6 Manuel Franceschini
      }
1506 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1507 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1508 8084f9f6 Manuel Franceschini
1509 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1510 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1511 8084f9f6 Manuel Franceschini

1512 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1513 5f83e263 Iustin Pop
    if the given volume group is valid.
1514 8084f9f6 Manuel Franceschini

1515 8084f9f6 Manuel Franceschini
    """
1516 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1517 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1518 8084f9f6 Manuel Franceschini
      for inst in instances:
1519 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1520 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1521 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1522 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1523 8084f9f6 Manuel Franceschini
1524 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1525 779c15bb Iustin Pop
1526 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1527 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1528 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1529 8084f9f6 Manuel Franceschini
      for node in node_list:
1530 4c4e4e1e Iustin Pop
        msg = vglist[node].fail_msg
1531 e480923b Iustin Pop
        if msg:
1532 781de953 Iustin Pop
          # ignoring down node
1533 e480923b Iustin Pop
          self.LogWarning("Error while gathering data on node %s"
1534 e480923b Iustin Pop
                          " (ignoring node): %s", node, msg)
1535 781de953 Iustin Pop
          continue
1536 e480923b Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].payload,
1537 781de953 Iustin Pop
                                              self.op.vg_name,
1538 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1539 8084f9f6 Manuel Franceschini
        if vgstatus:
1540 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1541 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1542 8084f9f6 Manuel Franceschini
1543 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1544 5af3da74 Guido Trotter
    # validate params changes
1545 779c15bb Iustin Pop
    if self.op.beparams:
1546 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1547 abe609b2 Guido Trotter
      self.new_beparams = objects.FillDict(
1548 4ef7f423 Guido Trotter
        cluster.beparams[constants.PP_DEFAULT], self.op.beparams)
1549 779c15bb Iustin Pop
1550 5af3da74 Guido Trotter
    if self.op.nicparams:
1551 5af3da74 Guido Trotter
      utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1552 5af3da74 Guido Trotter
      self.new_nicparams = objects.FillDict(
1553 5af3da74 Guido Trotter
        cluster.nicparams[constants.PP_DEFAULT], self.op.nicparams)
1554 5af3da74 Guido Trotter
      objects.NIC.CheckParameterSyntax(self.new_nicparams)
1555 5af3da74 Guido Trotter
1556 779c15bb Iustin Pop
    # hypervisor list/parameters
1557 abe609b2 Guido Trotter
    self.new_hvparams = objects.FillDict(cluster.hvparams, {})
1558 779c15bb Iustin Pop
    if self.op.hvparams:
1559 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1560 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1561 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1562 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1563 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1564 779c15bb Iustin Pop
        else:
1565 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1566 779c15bb Iustin Pop
1567 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1568 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1569 779c15bb Iustin Pop
    else:
1570 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1571 779c15bb Iustin Pop
1572 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1573 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1574 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1575 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1576 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1577 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1578 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1579 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1580 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1581 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1582 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1583 779c15bb Iustin Pop
1584 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1585 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1586 8084f9f6 Manuel Franceschini

1587 8084f9f6 Manuel Franceschini
    """
1588 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1589 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1590 b2482333 Guido Trotter
      if not new_volume:
1591 b2482333 Guido Trotter
        new_volume = None
1592 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1593 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1594 779c15bb Iustin Pop
      else:
1595 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1596 779c15bb Iustin Pop
                    " state, not changing")
1597 779c15bb Iustin Pop
    if self.op.hvparams:
1598 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1599 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1600 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1601 779c15bb Iustin Pop
    if self.op.beparams:
1602 4ef7f423 Guido Trotter
      self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1603 5af3da74 Guido Trotter
    if self.op.nicparams:
1604 5af3da74 Guido Trotter
      self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1605 5af3da74 Guido Trotter
1606 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1607 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1608 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1609 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1610 4b7735f9 Iustin Pop
1611 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1612 8084f9f6 Manuel Franceschini
1613 8084f9f6 Manuel Franceschini
1614 28eddce5 Guido Trotter
def _RedistributeAncillaryFiles(lu, additional_nodes=None):
1615 28eddce5 Guido Trotter
  """Distribute additional files which are part of the cluster configuration.
1616 28eddce5 Guido Trotter

1617 28eddce5 Guido Trotter
  ConfigWriter takes care of distributing the config and ssconf files, but
1618 28eddce5 Guido Trotter
  there are more files which should be distributed to all nodes. This function
1619 28eddce5 Guido Trotter
  makes sure those are copied.
1620 28eddce5 Guido Trotter

1621 28eddce5 Guido Trotter
  @param lu: calling logical unit
1622 28eddce5 Guido Trotter
  @param additional_nodes: list of nodes not in the config to distribute to
1623 28eddce5 Guido Trotter

1624 28eddce5 Guido Trotter
  """
1625 28eddce5 Guido Trotter
  # 1. Gather target nodes
1626 28eddce5 Guido Trotter
  myself = lu.cfg.GetNodeInfo(lu.cfg.GetMasterNode())
1627 28eddce5 Guido Trotter
  dist_nodes = lu.cfg.GetNodeList()
1628 28eddce5 Guido Trotter
  if additional_nodes is not None:
1629 28eddce5 Guido Trotter
    dist_nodes.extend(additional_nodes)
1630 28eddce5 Guido Trotter
  if myself.name in dist_nodes:
1631 28eddce5 Guido Trotter
    dist_nodes.remove(myself.name)
1632 28eddce5 Guido Trotter
  # 2. Gather files to distribute
1633 28eddce5 Guido Trotter
  dist_files = set([constants.ETC_HOSTS,
1634 28eddce5 Guido Trotter
                    constants.SSH_KNOWN_HOSTS_FILE,
1635 28eddce5 Guido Trotter
                    constants.RAPI_CERT_FILE,
1636 28eddce5 Guido Trotter
                    constants.RAPI_USERS_FILE,
1637 28eddce5 Guido Trotter
                   ])
1638 e1b8653f Guido Trotter
1639 e1b8653f Guido Trotter
  enabled_hypervisors = lu.cfg.GetClusterInfo().enabled_hypervisors
1640 e1b8653f Guido Trotter
  for hv_name in enabled_hypervisors:
1641 e1b8653f Guido Trotter
    hv_class = hypervisor.GetHypervisor(hv_name)
1642 e1b8653f Guido Trotter
    dist_files.update(hv_class.GetAncillaryFiles())
1643 e1b8653f Guido Trotter
1644 28eddce5 Guido Trotter
  # 3. Perform the files upload
1645 28eddce5 Guido Trotter
  for fname in dist_files:
1646 28eddce5 Guido Trotter
    if os.path.exists(fname):
1647 28eddce5 Guido Trotter
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1648 28eddce5 Guido Trotter
      for to_node, to_result in result.items():
1649 6f7d4e75 Iustin Pop
        msg = to_result.fail_msg
1650 6f7d4e75 Iustin Pop
        if msg:
1651 6f7d4e75 Iustin Pop
          msg = ("Copy of file %s to node %s failed: %s" %
1652 6f7d4e75 Iustin Pop
                 (fname, to_node, msg))
1653 6f7d4e75 Iustin Pop
          lu.proc.LogWarning(msg)
1654 28eddce5 Guido Trotter
1655 28eddce5 Guido Trotter
1656 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1657 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1658 afee0879 Iustin Pop

1659 afee0879 Iustin Pop
  This is a very simple LU.
1660 afee0879 Iustin Pop

1661 afee0879 Iustin Pop
  """
1662 afee0879 Iustin Pop
  _OP_REQP = []
1663 afee0879 Iustin Pop
  REQ_BGL = False
1664 afee0879 Iustin Pop
1665 afee0879 Iustin Pop
  def ExpandNames(self):
1666 afee0879 Iustin Pop
    self.needed_locks = {
1667 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1668 afee0879 Iustin Pop
    }
1669 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1670 afee0879 Iustin Pop
1671 afee0879 Iustin Pop
  def CheckPrereq(self):
1672 afee0879 Iustin Pop
    """Check prerequisites.
1673 afee0879 Iustin Pop

1674 afee0879 Iustin Pop
    """
1675 afee0879 Iustin Pop
1676 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1677 afee0879 Iustin Pop
    """Redistribute the configuration.
1678 afee0879 Iustin Pop

1679 afee0879 Iustin Pop
    """
1680 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1681 28eddce5 Guido Trotter
    _RedistributeAncillaryFiles(self)
1682 afee0879 Iustin Pop
1683 afee0879 Iustin Pop
1684 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1685 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1686 a8083063 Iustin Pop

1687 a8083063 Iustin Pop
  """
1688 a8083063 Iustin Pop
  if not instance.disks:
1689 a8083063 Iustin Pop
    return True
1690 a8083063 Iustin Pop
1691 a8083063 Iustin Pop
  if not oneshot:
1692 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1693 a8083063 Iustin Pop
1694 a8083063 Iustin Pop
  node = instance.primary_node
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
  for dev in instance.disks:
1697 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
  retries = 0
1700 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1701 a8083063 Iustin Pop
  while True:
1702 a8083063 Iustin Pop
    max_time = 0
1703 a8083063 Iustin Pop
    done = True
1704 a8083063 Iustin Pop
    cumul_degraded = False
1705 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1706 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1707 3efa9051 Iustin Pop
    if msg:
1708 3efa9051 Iustin Pop
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1709 a8083063 Iustin Pop
      retries += 1
1710 a8083063 Iustin Pop
      if retries >= 10:
1711 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1712 3ecf6786 Iustin Pop
                                 " aborting." % node)
1713 a8083063 Iustin Pop
      time.sleep(6)
1714 a8083063 Iustin Pop
      continue
1715 3efa9051 Iustin Pop
    rstats = rstats.payload
1716 a8083063 Iustin Pop
    retries = 0
1717 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1718 a8083063 Iustin Pop
      if mstat is None:
1719 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1720 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1721 a8083063 Iustin Pop
        continue
1722 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1723 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1724 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1725 a8083063 Iustin Pop
      if perc_done is not None:
1726 a8083063 Iustin Pop
        done = False
1727 a8083063 Iustin Pop
        if est_time is not None:
1728 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1729 a8083063 Iustin Pop
          max_time = est_time
1730 a8083063 Iustin Pop
        else:
1731 a8083063 Iustin Pop
          rem_time = "no time estimate"
1732 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1733 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1734 fbafd7a8 Iustin Pop
1735 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1736 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1737 fbafd7a8 Iustin Pop
    # we force restart of the loop
1738 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1739 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1740 fbafd7a8 Iustin Pop
      degr_retries -= 1
1741 fbafd7a8 Iustin Pop
      time.sleep(1)
1742 fbafd7a8 Iustin Pop
      continue
1743 fbafd7a8 Iustin Pop
1744 a8083063 Iustin Pop
    if done or oneshot:
1745 a8083063 Iustin Pop
      break
1746 a8083063 Iustin Pop
1747 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
  if done:
1750 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1751 a8083063 Iustin Pop
  return not cumul_degraded
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
1754 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1755 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1756 a8083063 Iustin Pop

1757 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1758 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1759 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1760 0834c866 Iustin Pop

1761 a8083063 Iustin Pop
  """
1762 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1763 0834c866 Iustin Pop
  if ldisk:
1764 0834c866 Iustin Pop
    idx = 6
1765 0834c866 Iustin Pop
  else:
1766 0834c866 Iustin Pop
    idx = 5
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
  result = True
1769 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1770 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1771 4c4e4e1e Iustin Pop
    msg = rstats.fail_msg
1772 23829f6f Iustin Pop
    if msg:
1773 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1774 23829f6f Iustin Pop
      result = False
1775 23829f6f Iustin Pop
    elif not rstats.payload:
1776 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1777 a8083063 Iustin Pop
      result = False
1778 a8083063 Iustin Pop
    else:
1779 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1780 a8083063 Iustin Pop
  if dev.children:
1781 a8083063 Iustin Pop
    for child in dev.children:
1782 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1783 a8083063 Iustin Pop
1784 a8083063 Iustin Pop
  return result
1785 a8083063 Iustin Pop
1786 a8083063 Iustin Pop
1787 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1788 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1789 a8083063 Iustin Pop

1790 a8083063 Iustin Pop
  """
1791 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1792 6bf01bbb Guido Trotter
  REQ_BGL = False
1793 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1794 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1795 a8083063 Iustin Pop
1796 6bf01bbb Guido Trotter
  def ExpandNames(self):
1797 1f9430d6 Iustin Pop
    if self.op.names:
1798 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1799 1f9430d6 Iustin Pop
1800 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1801 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1802 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1803 1f9430d6 Iustin Pop
1804 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1805 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1806 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1807 6bf01bbb Guido Trotter
    self.needed_locks = {}
1808 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1809 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1810 6bf01bbb Guido Trotter
1811 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1812 6bf01bbb Guido Trotter
    """Check prerequisites.
1813 6bf01bbb Guido Trotter

1814 6bf01bbb Guido Trotter
    """
1815 6bf01bbb Guido Trotter
1816 1f9430d6 Iustin Pop
  @staticmethod
1817 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1818 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1819 1f9430d6 Iustin Pop

1820 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1821 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1822 1f9430d6 Iustin Pop

1823 e4376078 Iustin Pop
    @rtype: dict
1824 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1825 255dcebd Iustin Pop
        nodes as keys and tuples of (path, status, diagnose) as values, eg::
1826 e4376078 Iustin Pop

1827 255dcebd Iustin Pop
          {"debian-etch": {"node1": [(/usr/lib/..., True, ""),
1828 255dcebd Iustin Pop
                                     (/srv/..., False, "invalid api")],
1829 255dcebd Iustin Pop
                           "node2": [(/srv/..., True, "")]}
1830 e4376078 Iustin Pop
          }
1831 1f9430d6 Iustin Pop

1832 1f9430d6 Iustin Pop
    """
1833 1f9430d6 Iustin Pop
    all_os = {}
1834 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1835 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1836 a6ab004b Iustin Pop
    # make all OSes invalid
1837 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1838 4c4e4e1e Iustin Pop
                  if not rlist[node_name].fail_msg]
1839 83d92ad8 Iustin Pop
    for node_name, nr in rlist.items():
1840 4c4e4e1e Iustin Pop
      if nr.fail_msg or not nr.payload:
1841 1f9430d6 Iustin Pop
        continue
1842 255dcebd Iustin Pop
      for name, path, status, diagnose in nr.payload:
1843 255dcebd Iustin Pop
        if name not in all_os:
1844 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1845 1f9430d6 Iustin Pop
          # for each node in node_list
1846 255dcebd Iustin Pop
          all_os[name] = {}
1847 a6ab004b Iustin Pop
          for nname in good_nodes:
1848 255dcebd Iustin Pop
            all_os[name][nname] = []
1849 255dcebd Iustin Pop
        all_os[name][node_name].append((path, status, diagnose))
1850 1f9430d6 Iustin Pop
    return all_os
1851 a8083063 Iustin Pop
1852 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1853 a8083063 Iustin Pop
    """Compute the list of OSes.
1854 a8083063 Iustin Pop

1855 a8083063 Iustin Pop
    """
1856 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1857 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1858 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1859 1f9430d6 Iustin Pop
    output = []
1860 83d92ad8 Iustin Pop
    for os_name, os_data in pol.items():
1861 1f9430d6 Iustin Pop
      row = []
1862 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1863 1f9430d6 Iustin Pop
        if field == "name":
1864 1f9430d6 Iustin Pop
          val = os_name
1865 1f9430d6 Iustin Pop
        elif field == "valid":
1866 255dcebd Iustin Pop
          val = utils.all([osl and osl[0][1] for osl in os_data.values()])
1867 1f9430d6 Iustin Pop
        elif field == "node_status":
1868 255dcebd Iustin Pop
          # this is just a copy of the dict
1869 1f9430d6 Iustin Pop
          val = {}
1870 255dcebd Iustin Pop
          for node_name, nos_list in os_data.items():
1871 255dcebd Iustin Pop
            val[node_name] = nos_list
1872 1f9430d6 Iustin Pop
        else:
1873 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1874 1f9430d6 Iustin Pop
        row.append(val)
1875 1f9430d6 Iustin Pop
      output.append(row)
1876 1f9430d6 Iustin Pop
1877 1f9430d6 Iustin Pop
    return output
1878 a8083063 Iustin Pop
1879 a8083063 Iustin Pop
1880 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1881 a8083063 Iustin Pop
  """Logical unit for removing a node.
1882 a8083063 Iustin Pop

1883 a8083063 Iustin Pop
  """
1884 a8083063 Iustin Pop
  HPATH = "node-remove"
1885 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1886 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1889 a8083063 Iustin Pop
    """Build hooks env.
1890 a8083063 Iustin Pop

1891 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1892 d08869ee Guido Trotter
    node would then be impossible to remove.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
    """
1895 396e1b78 Michael Hanselmann
    env = {
1896 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1897 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1898 396e1b78 Michael Hanselmann
      }
1899 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1900 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1901 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1902 a8083063 Iustin Pop
1903 a8083063 Iustin Pop
  def CheckPrereq(self):
1904 a8083063 Iustin Pop
    """Check prerequisites.
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
    This checks:
1907 a8083063 Iustin Pop
     - the node exists in the configuration
1908 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1909 a8083063 Iustin Pop
     - it's not the master
1910 a8083063 Iustin Pop

1911 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
1912 a8083063 Iustin Pop

1913 a8083063 Iustin Pop
    """
1914 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1915 a8083063 Iustin Pop
    if node is None:
1916 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1917 a8083063 Iustin Pop
1918 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1919 a8083063 Iustin Pop
1920 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1921 a8083063 Iustin Pop
    if node.name == masternode:
1922 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1923 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1924 a8083063 Iustin Pop
1925 a8083063 Iustin Pop
    for instance_name in instance_list:
1926 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1927 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1928 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1929 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1930 a8083063 Iustin Pop
    self.op.node_name = node.name
1931 a8083063 Iustin Pop
    self.node = node
1932 a8083063 Iustin Pop
1933 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1934 a8083063 Iustin Pop
    """Removes the node from the cluster.
1935 a8083063 Iustin Pop

1936 a8083063 Iustin Pop
    """
1937 a8083063 Iustin Pop
    node = self.node
1938 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1939 9a4f63d1 Iustin Pop
                 node.name)
1940 a8083063 Iustin Pop
1941 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1942 a8083063 Iustin Pop
1943 0623d351 Iustin Pop
    result = self.rpc.call_node_leave_cluster(node.name)
1944 4c4e4e1e Iustin Pop
    msg = result.fail_msg
1945 0623d351 Iustin Pop
    if msg:
1946 0623d351 Iustin Pop
      self.LogWarning("Errors encountered on the remote node while leaving"
1947 0623d351 Iustin Pop
                      " the cluster: %s", msg)
1948 c8a0948f Michael Hanselmann
1949 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1950 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1951 eb1742d5 Guido Trotter
1952 a8083063 Iustin Pop
1953 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1954 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1955 a8083063 Iustin Pop

1956 a8083063 Iustin Pop
  """
1957 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1958 35705d8f Guido Trotter
  REQ_BGL = False
1959 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1960 31bf511f Iustin Pop
    "dtotal", "dfree",
1961 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1962 31bf511f Iustin Pop
    "bootid",
1963 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1964 31bf511f Iustin Pop
    )
1965 31bf511f Iustin Pop
1966 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1967 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1968 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
1969 31bf511f Iustin Pop
    "pip", "sip", "tags",
1970 31bf511f Iustin Pop
    "serial_no",
1971 0e67cdbe Iustin Pop
    "master_candidate",
1972 0e67cdbe Iustin Pop
    "master",
1973 9ddb5e45 Iustin Pop
    "offline",
1974 0b2454b9 Iustin Pop
    "drained",
1975 c120ff34 Iustin Pop
    "role",
1976 31bf511f Iustin Pop
    )
1977 a8083063 Iustin Pop
1978 35705d8f Guido Trotter
  def ExpandNames(self):
1979 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1980 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1981 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1982 a8083063 Iustin Pop
1983 35705d8f Guido Trotter
    self.needed_locks = {}
1984 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1985 c8d8b4c8 Iustin Pop
1986 c8d8b4c8 Iustin Pop
    if self.op.names:
1987 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
1988 35705d8f Guido Trotter
    else:
1989 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
1990 c8d8b4c8 Iustin Pop
1991 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
1992 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
1993 c8d8b4c8 Iustin Pop
    if self.do_locking:
1994 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
1995 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
1996 c8d8b4c8 Iustin Pop
1997 35705d8f Guido Trotter
1998 35705d8f Guido Trotter
  def CheckPrereq(self):
1999 35705d8f Guido Trotter
    """Check prerequisites.
2000 35705d8f Guido Trotter

2001 35705d8f Guido Trotter
    """
2002 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2003 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2004 c8d8b4c8 Iustin Pop
    pass
2005 a8083063 Iustin Pop
2006 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2007 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2008 a8083063 Iustin Pop

2009 a8083063 Iustin Pop
    """
2010 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2011 c8d8b4c8 Iustin Pop
    if self.do_locking:
2012 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2013 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2014 3fa93523 Guido Trotter
      nodenames = self.wanted
2015 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2016 3fa93523 Guido Trotter
      if missing:
2017 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2018 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2019 c8d8b4c8 Iustin Pop
    else:
2020 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2021 c1f1cbb2 Iustin Pop
2022 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2023 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2024 a8083063 Iustin Pop
2025 a8083063 Iustin Pop
    # begin data gathering
2026 a8083063 Iustin Pop
2027 bc8e4a1a Iustin Pop
    if self.do_node_query:
2028 a8083063 Iustin Pop
      live_data = {}
2029 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2030 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2031 a8083063 Iustin Pop
      for name in nodenames:
2032 781de953 Iustin Pop
        nodeinfo = node_data[name]
2033 4c4e4e1e Iustin Pop
        if not nodeinfo.fail_msg and nodeinfo.payload:
2034 070e998b Iustin Pop
          nodeinfo = nodeinfo.payload
2035 d599d686 Iustin Pop
          fn = utils.TryConvert
2036 a8083063 Iustin Pop
          live_data[name] = {
2037 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2038 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2039 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2040 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2041 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2042 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2043 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2044 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2045 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2046 a8083063 Iustin Pop
            }
2047 a8083063 Iustin Pop
        else:
2048 a8083063 Iustin Pop
          live_data[name] = {}
2049 a8083063 Iustin Pop
    else:
2050 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2051 a8083063 Iustin Pop
2052 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2053 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2054 a8083063 Iustin Pop
2055 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2056 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2057 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2058 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
2059 a8083063 Iustin Pop
2060 ec223efb Iustin Pop
      for instance_name in instancelist:
2061 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
2062 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2063 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2064 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2065 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2066 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2067 a8083063 Iustin Pop
2068 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2069 0e67cdbe Iustin Pop
2070 a8083063 Iustin Pop
    # end data gathering
2071 a8083063 Iustin Pop
2072 a8083063 Iustin Pop
    output = []
2073 a8083063 Iustin Pop
    for node in nodelist:
2074 a8083063 Iustin Pop
      node_output = []
2075 a8083063 Iustin Pop
      for field in self.op.output_fields:
2076 a8083063 Iustin Pop
        if field == "name":
2077 a8083063 Iustin Pop
          val = node.name
2078 ec223efb Iustin Pop
        elif field == "pinst_list":
2079 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2080 ec223efb Iustin Pop
        elif field == "sinst_list":
2081 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2082 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2083 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2084 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2085 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2086 a8083063 Iustin Pop
        elif field == "pip":
2087 a8083063 Iustin Pop
          val = node.primary_ip
2088 a8083063 Iustin Pop
        elif field == "sip":
2089 a8083063 Iustin Pop
          val = node.secondary_ip
2090 130a6a6f Iustin Pop
        elif field == "tags":
2091 130a6a6f Iustin Pop
          val = list(node.GetTags())
2092 38d7239a Iustin Pop
        elif field == "serial_no":
2093 38d7239a Iustin Pop
          val = node.serial_no
2094 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2095 0e67cdbe Iustin Pop
          val = node.master_candidate
2096 0e67cdbe Iustin Pop
        elif field == "master":
2097 0e67cdbe Iustin Pop
          val = node.name == master_node
2098 9ddb5e45 Iustin Pop
        elif field == "offline":
2099 9ddb5e45 Iustin Pop
          val = node.offline
2100 0b2454b9 Iustin Pop
        elif field == "drained":
2101 0b2454b9 Iustin Pop
          val = node.drained
2102 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2103 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2104 c120ff34 Iustin Pop
        elif field == "role":
2105 c120ff34 Iustin Pop
          if node.name == master_node:
2106 c120ff34 Iustin Pop
            val = "M"
2107 c120ff34 Iustin Pop
          elif node.master_candidate:
2108 c120ff34 Iustin Pop
            val = "C"
2109 c120ff34 Iustin Pop
          elif node.drained:
2110 c120ff34 Iustin Pop
            val = "D"
2111 c120ff34 Iustin Pop
          elif node.offline:
2112 c120ff34 Iustin Pop
            val = "O"
2113 c120ff34 Iustin Pop
          else:
2114 c120ff34 Iustin Pop
            val = "R"
2115 a8083063 Iustin Pop
        else:
2116 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2117 a8083063 Iustin Pop
        node_output.append(val)
2118 a8083063 Iustin Pop
      output.append(node_output)
2119 a8083063 Iustin Pop
2120 a8083063 Iustin Pop
    return output
2121 a8083063 Iustin Pop
2122 a8083063 Iustin Pop
2123 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2124 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2125 dcb93971 Michael Hanselmann

2126 dcb93971 Michael Hanselmann
  """
2127 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2128 21a15682 Guido Trotter
  REQ_BGL = False
2129 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2130 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2131 21a15682 Guido Trotter
2132 21a15682 Guido Trotter
  def ExpandNames(self):
2133 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2134 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2135 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2136 21a15682 Guido Trotter
2137 21a15682 Guido Trotter
    self.needed_locks = {}
2138 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2139 21a15682 Guido Trotter
    if not self.op.nodes:
2140 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2141 21a15682 Guido Trotter
    else:
2142 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2143 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2144 dcb93971 Michael Hanselmann
2145 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2146 dcb93971 Michael Hanselmann
    """Check prerequisites.
2147 dcb93971 Michael Hanselmann

2148 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2149 dcb93971 Michael Hanselmann

2150 dcb93971 Michael Hanselmann
    """
2151 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2152 dcb93971 Michael Hanselmann
2153 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2154 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2155 dcb93971 Michael Hanselmann

2156 dcb93971 Michael Hanselmann
    """
2157 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2158 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2159 dcb93971 Michael Hanselmann
2160 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2161 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2162 dcb93971 Michael Hanselmann
2163 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2164 dcb93971 Michael Hanselmann
2165 dcb93971 Michael Hanselmann
    output = []
2166 dcb93971 Michael Hanselmann
    for node in nodenames:
2167 10bfe6cb Iustin Pop
      nresult = volumes[node]
2168 10bfe6cb Iustin Pop
      if nresult.offline:
2169 10bfe6cb Iustin Pop
        continue
2170 4c4e4e1e Iustin Pop
      msg = nresult.fail_msg
2171 10bfe6cb Iustin Pop
      if msg:
2172 10bfe6cb Iustin Pop
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2173 37d19eb2 Michael Hanselmann
        continue
2174 37d19eb2 Michael Hanselmann
2175 10bfe6cb Iustin Pop
      node_vols = nresult.payload[:]
2176 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2177 dcb93971 Michael Hanselmann
2178 dcb93971 Michael Hanselmann
      for vol in node_vols:
2179 dcb93971 Michael Hanselmann
        node_output = []
2180 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2181 dcb93971 Michael Hanselmann
          if field == "node":
2182 dcb93971 Michael Hanselmann
            val = node
2183 dcb93971 Michael Hanselmann
          elif field == "phys":
2184 dcb93971 Michael Hanselmann
            val = vol['dev']
2185 dcb93971 Michael Hanselmann
          elif field == "vg":
2186 dcb93971 Michael Hanselmann
            val = vol['vg']
2187 dcb93971 Michael Hanselmann
          elif field == "name":
2188 dcb93971 Michael Hanselmann
            val = vol['name']
2189 dcb93971 Michael Hanselmann
          elif field == "size":
2190 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2191 dcb93971 Michael Hanselmann
          elif field == "instance":
2192 dcb93971 Michael Hanselmann
            for inst in ilist:
2193 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2194 dcb93971 Michael Hanselmann
                continue
2195 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2196 dcb93971 Michael Hanselmann
                val = inst.name
2197 dcb93971 Michael Hanselmann
                break
2198 dcb93971 Michael Hanselmann
            else:
2199 dcb93971 Michael Hanselmann
              val = '-'
2200 dcb93971 Michael Hanselmann
          else:
2201 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2202 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2203 dcb93971 Michael Hanselmann
2204 dcb93971 Michael Hanselmann
        output.append(node_output)
2205 dcb93971 Michael Hanselmann
2206 dcb93971 Michael Hanselmann
    return output
2207 dcb93971 Michael Hanselmann
2208 dcb93971 Michael Hanselmann
2209 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2210 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2211 a8083063 Iustin Pop

2212 a8083063 Iustin Pop
  """
2213 a8083063 Iustin Pop
  HPATH = "node-add"
2214 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2215 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2216 a8083063 Iustin Pop
2217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2218 a8083063 Iustin Pop
    """Build hooks env.
2219 a8083063 Iustin Pop

2220 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2221 a8083063 Iustin Pop

2222 a8083063 Iustin Pop
    """
2223 a8083063 Iustin Pop
    env = {
2224 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2225 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2226 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2227 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2228 a8083063 Iustin Pop
      }
2229 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2230 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2231 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2232 a8083063 Iustin Pop
2233 a8083063 Iustin Pop
  def CheckPrereq(self):
2234 a8083063 Iustin Pop
    """Check prerequisites.
2235 a8083063 Iustin Pop

2236 a8083063 Iustin Pop
    This checks:
2237 a8083063 Iustin Pop
     - the new node is not already in the config
2238 a8083063 Iustin Pop
     - it is resolvable
2239 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2240 a8083063 Iustin Pop

2241 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2242 a8083063 Iustin Pop

2243 a8083063 Iustin Pop
    """
2244 a8083063 Iustin Pop
    node_name = self.op.node_name
2245 a8083063 Iustin Pop
    cfg = self.cfg
2246 a8083063 Iustin Pop
2247 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2248 a8083063 Iustin Pop
2249 bcf043c9 Iustin Pop
    node = dns_data.name
2250 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2251 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2252 a8083063 Iustin Pop
    if secondary_ip is None:
2253 a8083063 Iustin Pop
      secondary_ip = primary_ip
2254 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2255 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2256 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2257 e7c6e02b Michael Hanselmann
2258 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2259 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2260 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2261 e7c6e02b Michael Hanselmann
                                 node)
2262 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2263 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2264 a8083063 Iustin Pop
2265 a8083063 Iustin Pop
    for existing_node_name in node_list:
2266 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2267 e7c6e02b Michael Hanselmann
2268 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2269 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2270 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2271 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2272 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2273 e7c6e02b Michael Hanselmann
        continue
2274 e7c6e02b Michael Hanselmann
2275 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2276 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2277 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2278 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2279 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2280 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2281 a8083063 Iustin Pop
2282 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2283 a8083063 Iustin Pop
    # same as for the master
2284 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2285 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2286 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2287 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2288 a8083063 Iustin Pop
      if master_singlehomed:
2289 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2290 3ecf6786 Iustin Pop
                                   " new node has one")
2291 a8083063 Iustin Pop
      else:
2292 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2293 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2294 a8083063 Iustin Pop
2295 5bbd3f7f Michael Hanselmann
    # checks reachability
2296 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2297 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2298 a8083063 Iustin Pop
2299 a8083063 Iustin Pop
    if not newbie_singlehomed:
2300 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2301 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2302 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2303 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2304 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2305 a8083063 Iustin Pop
2306 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2307 a8ae3eb5 Iustin Pop
    if self.op.readd:
2308 a8ae3eb5 Iustin Pop
      exceptions = [node]
2309 a8ae3eb5 Iustin Pop
    else:
2310 a8ae3eb5 Iustin Pop
      exceptions = []
2311 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2312 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2313 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2314 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2315 0fff97e9 Guido Trotter
2316 a8ae3eb5 Iustin Pop
    if self.op.readd:
2317 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2318 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2319 a8ae3eb5 Iustin Pop
    else:
2320 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2321 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2322 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2323 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2324 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2325 a8083063 Iustin Pop
2326 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2327 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2328 a8083063 Iustin Pop

2329 a8083063 Iustin Pop
    """
2330 a8083063 Iustin Pop
    new_node = self.new_node
2331 a8083063 Iustin Pop
    node = new_node.name
2332 a8083063 Iustin Pop
2333 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2334 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2335 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2336 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2337 a8ae3eb5 Iustin Pop
    if self.op.readd:
2338 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2339 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2340 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2341 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2342 a8ae3eb5 Iustin Pop
2343 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2344 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2345 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2346 a8ae3eb5 Iustin Pop
2347 a8083063 Iustin Pop
    # check connectivity
2348 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2349 4c4e4e1e Iustin Pop
    result.Raise("Can't get version information from node %s" % node)
2350 90b54c26 Iustin Pop
    if constants.PROTOCOL_VERSION == result.payload:
2351 90b54c26 Iustin Pop
      logging.info("Communication to node %s fine, sw version %s match",
2352 90b54c26 Iustin Pop
                   node, result.payload)
2353 a8083063 Iustin Pop
    else:
2354 90b54c26 Iustin Pop
      raise errors.OpExecError("Version mismatch master version %s,"
2355 90b54c26 Iustin Pop
                               " node version %s" %
2356 90b54c26 Iustin Pop
                               (constants.PROTOCOL_VERSION, result.payload))
2357 a8083063 Iustin Pop
2358 a8083063 Iustin Pop
    # setup ssh on node
2359 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2360 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2361 a8083063 Iustin Pop
    keyarray = []
2362 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2363 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2364 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2365 a8083063 Iustin Pop
2366 a8083063 Iustin Pop
    for i in keyfiles:
2367 a8083063 Iustin Pop
      f = open(i, 'r')
2368 a8083063 Iustin Pop
      try:
2369 a8083063 Iustin Pop
        keyarray.append(f.read())
2370 a8083063 Iustin Pop
      finally:
2371 a8083063 Iustin Pop
        f.close()
2372 a8083063 Iustin Pop
2373 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2374 72737a7f Iustin Pop
                                    keyarray[2],
2375 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2376 4c4e4e1e Iustin Pop
    result.Raise("Cannot transfer ssh keys to the new node")
2377 a8083063 Iustin Pop
2378 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2379 b86a6bcd Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2380 b86a6bcd Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2381 c8a0948f Michael Hanselmann
2382 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2383 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2384 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2385 4c4e4e1e Iustin Pop
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2386 4c4e4e1e Iustin Pop
                   prereq=True)
2387 c2fc8250 Iustin Pop
      if not result.payload:
2388 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2389 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2390 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2391 a8083063 Iustin Pop
2392 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2393 5c0527ed Guido Trotter
    node_verify_param = {
2394 5c0527ed Guido Trotter
      'nodelist': [node],
2395 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2396 5c0527ed Guido Trotter
    }
2397 5c0527ed Guido Trotter
2398 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2399 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2400 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2401 4c4e4e1e Iustin Pop
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2402 6f68a739 Iustin Pop
      nl_payload = result[verifier].payload['nodelist']
2403 6f68a739 Iustin Pop
      if nl_payload:
2404 6f68a739 Iustin Pop
        for failed in nl_payload:
2405 5c0527ed Guido Trotter
          feedback_fn("ssh/hostname verification failed %s -> %s" %
2406 6f68a739 Iustin Pop
                      (verifier, nl_payload[failed]))
2407 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2408 ff98055b Iustin Pop
2409 d8470559 Michael Hanselmann
    if self.op.readd:
2410 28eddce5 Guido Trotter
      _RedistributeAncillaryFiles(self)
2411 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2412 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2413 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2414 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2415 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2416 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2417 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2418 a8ae3eb5 Iustin Pop
        if msg:
2419 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2420 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2421 d8470559 Michael Hanselmann
    else:
2422 035566e3 Iustin Pop
      _RedistributeAncillaryFiles(self, additional_nodes=[node])
2423 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2424 a8083063 Iustin Pop
2425 a8083063 Iustin Pop
2426 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2427 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2428 b31c8676 Iustin Pop

2429 b31c8676 Iustin Pop
  """
2430 b31c8676 Iustin Pop
  HPATH = "node-modify"
2431 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2432 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2433 b31c8676 Iustin Pop
  REQ_BGL = False
2434 b31c8676 Iustin Pop
2435 b31c8676 Iustin Pop
  def CheckArguments(self):
2436 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2437 b31c8676 Iustin Pop
    if node_name is None:
2438 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2439 b31c8676 Iustin Pop
    self.op.node_name = node_name
2440 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2441 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2442 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2443 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2444 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2445 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2446 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2447 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2448 c9d443ea Iustin Pop
                                 " state at the same time")
2449 b31c8676 Iustin Pop
2450 b31c8676 Iustin Pop
  def ExpandNames(self):
2451 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2452 b31c8676 Iustin Pop
2453 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2454 b31c8676 Iustin Pop
    """Build hooks env.
2455 b31c8676 Iustin Pop

2456 b31c8676 Iustin Pop
    This runs on the master node.
2457 b31c8676 Iustin Pop

2458 b31c8676 Iustin Pop
    """
2459 b31c8676 Iustin Pop
    env = {
2460 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2461 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2462 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2463 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2464 b31c8676 Iustin Pop
      }
2465 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2466 b31c8676 Iustin Pop
          self.op.node_name]
2467 b31c8676 Iustin Pop
    return env, nl, nl
2468 b31c8676 Iustin Pop
2469 b31c8676 Iustin Pop
  def CheckPrereq(self):
2470 b31c8676 Iustin Pop
    """Check prerequisites.
2471 b31c8676 Iustin Pop

2472 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2473 b31c8676 Iustin Pop

2474 b31c8676 Iustin Pop
    """
2475 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2476 b31c8676 Iustin Pop
2477 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2478 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2479 3a5ba66a Iustin Pop
      # we will demote the node from master_candidate
2480 3a26773f Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2481 3a26773f Iustin Pop
        raise errors.OpPrereqError("The master node has to be a"
2482 c9d443ea Iustin Pop
                                   " master candidate, online and not drained")
2483 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2484 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2485 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2486 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2487 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2488 3a5ba66a Iustin Pop
        if self.op.force:
2489 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2490 3e83dd48 Iustin Pop
        else:
2491 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2492 3e83dd48 Iustin Pop
2493 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2494 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2495 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2496 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2497 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2498 3a5ba66a Iustin Pop
2499 b31c8676 Iustin Pop
    return
2500 b31c8676 Iustin Pop
2501 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2502 b31c8676 Iustin Pop
    """Modifies a node.
2503 b31c8676 Iustin Pop

2504 b31c8676 Iustin Pop
    """
2505 3a5ba66a Iustin Pop
    node = self.node
2506 b31c8676 Iustin Pop
2507 b31c8676 Iustin Pop
    result = []
2508 c9d443ea Iustin Pop
    changed_mc = False
2509 b31c8676 Iustin Pop
2510 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2511 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2512 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2513 c9d443ea Iustin Pop
      if self.op.offline == True:
2514 c9d443ea Iustin Pop
        if node.master_candidate:
2515 c9d443ea Iustin Pop
          node.master_candidate = False
2516 c9d443ea Iustin Pop
          changed_mc = True
2517 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2518 c9d443ea Iustin Pop
        if node.drained:
2519 c9d443ea Iustin Pop
          node.drained = False
2520 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2521 3a5ba66a Iustin Pop
2522 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2523 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2524 c9d443ea Iustin Pop
      changed_mc = True
2525 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2526 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2527 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2528 4c4e4e1e Iustin Pop
        msg = rrc.fail_msg
2529 0959c824 Iustin Pop
        if msg:
2530 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2531 b31c8676 Iustin Pop
2532 c9d443ea Iustin Pop
    if self.op.drained is not None:
2533 c9d443ea Iustin Pop
      node.drained = self.op.drained
2534 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2535 c9d443ea Iustin Pop
      if self.op.drained == True:
2536 c9d443ea Iustin Pop
        if node.master_candidate:
2537 c9d443ea Iustin Pop
          node.master_candidate = False
2538 c9d443ea Iustin Pop
          changed_mc = True
2539 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2540 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2541 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2542 dec0d9da Iustin Pop
          if msg:
2543 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2544 c9d443ea Iustin Pop
        if node.offline:
2545 c9d443ea Iustin Pop
          node.offline = False
2546 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2547 c9d443ea Iustin Pop
2548 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2549 b31c8676 Iustin Pop
    self.cfg.Update(node)
2550 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2551 c9d443ea Iustin Pop
    if changed_mc:
2552 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2553 b31c8676 Iustin Pop
2554 b31c8676 Iustin Pop
    return result
2555 b31c8676 Iustin Pop
2556 b31c8676 Iustin Pop
2557 f5118ade Iustin Pop
class LUPowercycleNode(NoHooksLU):
2558 f5118ade Iustin Pop
  """Powercycles a node.
2559 f5118ade Iustin Pop

2560 f5118ade Iustin Pop
  """
2561 f5118ade Iustin Pop
  _OP_REQP = ["node_name", "force"]
2562 f5118ade Iustin Pop
  REQ_BGL = False
2563 f5118ade Iustin Pop
2564 f5118ade Iustin Pop
  def CheckArguments(self):
2565 f5118ade Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2566 f5118ade Iustin Pop
    if node_name is None:
2567 f5118ade Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2568 f5118ade Iustin Pop
    self.op.node_name = node_name
2569 f5118ade Iustin Pop
    if node_name == self.cfg.GetMasterNode() and not self.op.force:
2570 f5118ade Iustin Pop
      raise errors.OpPrereqError("The node is the master and the force"
2571 f5118ade Iustin Pop
                                 " parameter was not set")
2572 f5118ade Iustin Pop
2573 f5118ade Iustin Pop
  def ExpandNames(self):
2574 f5118ade Iustin Pop
    """Locking for PowercycleNode.
2575 f5118ade Iustin Pop

2576 f5118ade Iustin Pop
    This is a last-resource option and shouldn't block on other
2577 f5118ade Iustin Pop
    jobs. Therefore, we grab no locks.
2578 f5118ade Iustin Pop

2579 f5118ade Iustin Pop
    """
2580 f5118ade Iustin Pop
    self.needed_locks = {}
2581 f5118ade Iustin Pop
2582 f5118ade Iustin Pop
  def CheckPrereq(self):
2583 f5118ade Iustin Pop
    """Check prerequisites.
2584 f5118ade Iustin Pop

2585 f5118ade Iustin Pop
    This LU has no prereqs.
2586 f5118ade Iustin Pop

2587 f5118ade Iustin Pop
    """
2588 f5118ade Iustin Pop
    pass
2589 f5118ade Iustin Pop
2590 f5118ade Iustin Pop
  def Exec(self, feedback_fn):
2591 f5118ade Iustin Pop
    """Reboots a node.
2592 f5118ade Iustin Pop

2593 f5118ade Iustin Pop
    """
2594 f5118ade Iustin Pop
    result = self.rpc.call_node_powercycle(self.op.node_name,
2595 f5118ade Iustin Pop
                                           self.cfg.GetHypervisorType())
2596 4c4e4e1e Iustin Pop
    result.Raise("Failed to schedule the reboot")
2597 f5118ade Iustin Pop
    return result.payload
2598 f5118ade Iustin Pop
2599 f5118ade Iustin Pop
2600 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2601 a8083063 Iustin Pop
  """Query cluster configuration.
2602 a8083063 Iustin Pop

2603 a8083063 Iustin Pop
  """
2604 a8083063 Iustin Pop
  _OP_REQP = []
2605 642339cf Guido Trotter
  REQ_BGL = False
2606 642339cf Guido Trotter
2607 642339cf Guido Trotter
  def ExpandNames(self):
2608 642339cf Guido Trotter
    self.needed_locks = {}
2609 a8083063 Iustin Pop
2610 a8083063 Iustin Pop
  def CheckPrereq(self):
2611 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2612 a8083063 Iustin Pop

2613 a8083063 Iustin Pop
    """
2614 a8083063 Iustin Pop
    pass
2615 a8083063 Iustin Pop
2616 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2617 a8083063 Iustin Pop
    """Return cluster config.
2618 a8083063 Iustin Pop

2619 a8083063 Iustin Pop
    """
2620 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2621 a8083063 Iustin Pop
    result = {
2622 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2623 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2624 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2625 d1a7d66f Guido Trotter
      "os_api_version": max(constants.OS_API_VERSIONS),
2626 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2627 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2628 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2629 469f88e1 Iustin Pop
      "master": cluster.master_node,
2630 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2631 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2632 7c4d6c7b Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor])
2633 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
2634 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2635 1094acda Guido Trotter
      "nicparams": cluster.nicparams,
2636 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2637 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2638 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2639 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2640 a8083063 Iustin Pop
      }
2641 a8083063 Iustin Pop
2642 a8083063 Iustin Pop
    return result
2643 a8083063 Iustin Pop
2644 a8083063 Iustin Pop
2645 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2646 ae5849b5 Michael Hanselmann
  """Return configuration values.
2647 a8083063 Iustin Pop

2648 a8083063 Iustin Pop
  """
2649 a8083063 Iustin Pop
  _OP_REQP = []
2650 642339cf Guido Trotter
  REQ_BGL = False
2651 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2652 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2653 642339cf Guido Trotter
2654 642339cf Guido Trotter
  def ExpandNames(self):
2655 642339cf Guido Trotter
    self.needed_locks = {}
2656 a8083063 Iustin Pop
2657 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2658 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2659 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2660 ae5849b5 Michael Hanselmann
2661 a8083063 Iustin Pop
  def CheckPrereq(self):
2662 a8083063 Iustin Pop
    """No prerequisites.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
    """
2665 a8083063 Iustin Pop
    pass
2666 a8083063 Iustin Pop
2667 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2668 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2669 a8083063 Iustin Pop

2670 a8083063 Iustin Pop
    """
2671 ae5849b5 Michael Hanselmann
    values = []
2672 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2673 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2674 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2675 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2676 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2677 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2678 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2679 ae5849b5 Michael Hanselmann
      else:
2680 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2681 3ccafd0e Iustin Pop
      values.append(entry)
2682 ae5849b5 Michael Hanselmann
    return values
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
2685 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2686 a8083063 Iustin Pop
  """Bring up an instance's disks.
2687 a8083063 Iustin Pop

2688 a8083063 Iustin Pop
  """
2689 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2690 f22a8ba3 Guido Trotter
  REQ_BGL = False
2691 f22a8ba3 Guido Trotter
2692 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2693 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2694 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2695 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2696 f22a8ba3 Guido Trotter
2697 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2698 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2699 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2700 a8083063 Iustin Pop
2701 a8083063 Iustin Pop
  def CheckPrereq(self):
2702 a8083063 Iustin Pop
    """Check prerequisites.
2703 a8083063 Iustin Pop

2704 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2705 a8083063 Iustin Pop

2706 a8083063 Iustin Pop
    """
2707 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2708 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2709 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2710 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2711 a8083063 Iustin Pop
2712 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2713 a8083063 Iustin Pop
    """Activate the disks.
2714 a8083063 Iustin Pop

2715 a8083063 Iustin Pop
    """
2716 b9bddb6b Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self, self.instance)
2717 a8083063 Iustin Pop
    if not disks_ok:
2718 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2719 a8083063 Iustin Pop
2720 a8083063 Iustin Pop
    return disks_info
2721 a8083063 Iustin Pop
2722 a8083063 Iustin Pop
2723 b9bddb6b Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False):
2724 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2725 a8083063 Iustin Pop

2726 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2727 a8083063 Iustin Pop

2728 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2729 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2730 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2731 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2732 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2733 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2734 e4376078 Iustin Pop
      won't result in an error return from the function
2735 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2736 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2737 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2738 a8083063 Iustin Pop

2739 a8083063 Iustin Pop
  """
2740 a8083063 Iustin Pop
  device_info = []
2741 a8083063 Iustin Pop
  disks_ok = True
2742 fdbd668d Iustin Pop
  iname = instance.name
2743 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2744 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2745 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2746 fdbd668d Iustin Pop
2747 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2748 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2749 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2750 fdbd668d Iustin Pop
  # SyncSource, etc.)
2751 fdbd668d Iustin Pop
2752 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2753 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2754 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2755 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2756 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2757 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2758 53c14ef1 Iustin Pop
      if msg:
2759 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2760 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2761 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2762 fdbd668d Iustin Pop
        if not ignore_secondaries:
2763 a8083063 Iustin Pop
          disks_ok = False
2764 fdbd668d Iustin Pop
2765 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2766 fdbd668d Iustin Pop
2767 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2768 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2769 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2770 fdbd668d Iustin Pop
      if node != instance.primary_node:
2771 fdbd668d Iustin Pop
        continue
2772 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2773 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2774 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2775 53c14ef1 Iustin Pop
      if msg:
2776 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2777 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2778 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2779 fdbd668d Iustin Pop
        disks_ok = False
2780 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2781 1dff8e07 Iustin Pop
                        result.payload))
2782 a8083063 Iustin Pop
2783 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2784 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2785 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2786 b352ab5b Iustin Pop
  for disk in instance.disks:
2787 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2788 b352ab5b Iustin Pop
2789 a8083063 Iustin Pop
  return disks_ok, device_info
2790 a8083063 Iustin Pop
2791 a8083063 Iustin Pop
2792 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2793 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2794 3ecf6786 Iustin Pop

2795 3ecf6786 Iustin Pop
  """
2796 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2797 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2798 fe7b0351 Michael Hanselmann
  if not disks_ok:
2799 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2800 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2801 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2802 86d9d3bb Iustin Pop
                         " secondary node,"
2803 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2804 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2805 fe7b0351 Michael Hanselmann
2806 fe7b0351 Michael Hanselmann
2807 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2808 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2809 a8083063 Iustin Pop

2810 a8083063 Iustin Pop
  """
2811 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2812 f22a8ba3 Guido Trotter
  REQ_BGL = False
2813 f22a8ba3 Guido Trotter
2814 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2815 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2816 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2817 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2818 f22a8ba3 Guido Trotter
2819 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2820 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2821 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2822 a8083063 Iustin Pop
2823 a8083063 Iustin Pop
  def CheckPrereq(self):
2824 a8083063 Iustin Pop
    """Check prerequisites.
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2827 a8083063 Iustin Pop

2828 a8083063 Iustin Pop
    """
2829 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2830 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2831 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2832 a8083063 Iustin Pop
2833 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2834 a8083063 Iustin Pop
    """Deactivate the disks
2835 a8083063 Iustin Pop

2836 a8083063 Iustin Pop
    """
2837 a8083063 Iustin Pop
    instance = self.instance
2838 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2839 a8083063 Iustin Pop
2840 a8083063 Iustin Pop
2841 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2842 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2843 155d6c75 Guido Trotter

2844 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2845 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2846 155d6c75 Guido Trotter

2847 155d6c75 Guido Trotter
  """
2848 aca13712 Iustin Pop
  pnode = instance.primary_node
2849 4c4e4e1e Iustin Pop
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2850 4c4e4e1e Iustin Pop
  ins_l.Raise("Can't contact node %s" % pnode)
2851 aca13712 Iustin Pop
2852 aca13712 Iustin Pop
  if instance.name in ins_l.payload:
2853 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2854 155d6c75 Guido Trotter
                             " block devices.")
2855 155d6c75 Guido Trotter
2856 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2857 a8083063 Iustin Pop
2858 a8083063 Iustin Pop
2859 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2860 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2861 a8083063 Iustin Pop

2862 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2863 a8083063 Iustin Pop

2864 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2865 a8083063 Iustin Pop
  ignored.
2866 a8083063 Iustin Pop

2867 a8083063 Iustin Pop
  """
2868 cacfd1fd Iustin Pop
  all_result = True
2869 a8083063 Iustin Pop
  for disk in instance.disks:
2870 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2871 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2872 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2873 4c4e4e1e Iustin Pop
      msg = result.fail_msg
2874 cacfd1fd Iustin Pop
      if msg:
2875 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2876 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2877 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2878 cacfd1fd Iustin Pop
          all_result = False
2879 cacfd1fd Iustin Pop
  return all_result
2880 a8083063 Iustin Pop
2881 a8083063 Iustin Pop
2882 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2883 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2884 d4f16fd9 Iustin Pop

2885 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2886 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2887 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2888 d4f16fd9 Iustin Pop
  exception.
2889 d4f16fd9 Iustin Pop

2890 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2891 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2892 e69d05fd Iustin Pop
  @type node: C{str}
2893 e69d05fd Iustin Pop
  @param node: the node to check
2894 e69d05fd Iustin Pop
  @type reason: C{str}
2895 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2896 e69d05fd Iustin Pop
  @type requested: C{int}
2897 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2898 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2899 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2900 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2901 e69d05fd Iustin Pop
      we cannot check the node
2902 d4f16fd9 Iustin Pop

2903 d4f16fd9 Iustin Pop
  """
2904 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2905 4c4e4e1e Iustin Pop
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2906 070e998b Iustin Pop
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2907 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2908 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2909 070e998b Iustin Pop
                               " was '%s'" % (node, free_mem))
2910 d4f16fd9 Iustin Pop
  if requested > free_mem:
2911 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2912 070e998b Iustin Pop
                               " needed %s MiB, available %s MiB" %
2913 070e998b Iustin Pop
                               (node, reason, requested, free_mem))
2914 d4f16fd9 Iustin Pop
2915 d4f16fd9 Iustin Pop
2916 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2917 a8083063 Iustin Pop
  """Starts an instance.
2918 a8083063 Iustin Pop

2919 a8083063 Iustin Pop
  """
2920 a8083063 Iustin Pop
  HPATH = "instance-start"
2921 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2922 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2923 e873317a Guido Trotter
  REQ_BGL = False
2924 e873317a Guido Trotter
2925 e873317a Guido Trotter
  def ExpandNames(self):
2926 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2927 a8083063 Iustin Pop
2928 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2929 a8083063 Iustin Pop
    """Build hooks env.
2930 a8083063 Iustin Pop

2931 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2932 a8083063 Iustin Pop

2933 a8083063 Iustin Pop
    """
2934 a8083063 Iustin Pop
    env = {
2935 a8083063 Iustin Pop
      "FORCE": self.op.force,
2936 a8083063 Iustin Pop
      }
2937 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2938 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2939 a8083063 Iustin Pop
    return env, nl, nl
2940 a8083063 Iustin Pop
2941 a8083063 Iustin Pop
  def CheckPrereq(self):
2942 a8083063 Iustin Pop
    """Check prerequisites.
2943 a8083063 Iustin Pop

2944 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2945 a8083063 Iustin Pop

2946 a8083063 Iustin Pop
    """
2947 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2948 e873317a Guido Trotter
    assert self.instance is not None, \
2949 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2950 a8083063 Iustin Pop
2951 d04aaa2f Iustin Pop
    # extra beparams
2952 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2953 d04aaa2f Iustin Pop
    if self.beparams:
2954 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2955 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2956 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2957 d04aaa2f Iustin Pop
      # fill the beparams dict
2958 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2959 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2960 d04aaa2f Iustin Pop
2961 d04aaa2f Iustin Pop
    # extra hvparams
2962 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2963 d04aaa2f Iustin Pop
    if self.hvparams:
2964 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
2965 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
2966 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
2967 d04aaa2f Iustin Pop
2968 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
2969 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
2970 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
2971 abe609b2 Guido Trotter
      filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor],
2972 d04aaa2f Iustin Pop
                                    instance.hvparams)
2973 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
2974 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
2975 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
2976 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
2977 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
2978 d04aaa2f Iustin Pop
2979 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
2980 7527a8a4 Iustin Pop
2981 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2982 5bbd3f7f Michael Hanselmann
    # check bridges existence
2983 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
2984 a8083063 Iustin Pop
2985 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2986 f1926756 Guido Trotter
                                              instance.name,
2987 f1926756 Guido Trotter
                                              instance.hypervisor)
2988 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2989 4c4e4e1e Iustin Pop
                      prereq=True)
2990 7ad1af4a Iustin Pop
    if not remote_info.payload: # not running already
2991 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
2992 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
2993 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
2994 d4f16fd9 Iustin Pop
2995 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2996 a8083063 Iustin Pop
    """Start the instance.
2997 a8083063 Iustin Pop

2998 a8083063 Iustin Pop
    """
2999 a8083063 Iustin Pop
    instance = self.instance
3000 a8083063 Iustin Pop
    force = self.op.force
3001 a8083063 Iustin Pop
3002 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3003 fe482621 Iustin Pop
3004 a8083063 Iustin Pop
    node_current = instance.primary_node
3005 a8083063 Iustin Pop
3006 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3007 a8083063 Iustin Pop
3008 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3009 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3010 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3011 dd279568 Iustin Pop
    if msg:
3012 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3013 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3014 a8083063 Iustin Pop
3015 a8083063 Iustin Pop
3016 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3017 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3018 bf6929a2 Alexander Schreiber

3019 bf6929a2 Alexander Schreiber
  """
3020 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3021 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3022 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3023 e873317a Guido Trotter
  REQ_BGL = False
3024 e873317a Guido Trotter
3025 e873317a Guido Trotter
  def ExpandNames(self):
3026 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3027 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3028 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3029 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3030 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3031 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3032 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3033 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3034 bf6929a2 Alexander Schreiber
3035 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3036 bf6929a2 Alexander Schreiber
    """Build hooks env.
3037 bf6929a2 Alexander Schreiber

3038 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3039 bf6929a2 Alexander Schreiber

3040 bf6929a2 Alexander Schreiber
    """
3041 bf6929a2 Alexander Schreiber
    env = {
3042 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3043 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3044 bf6929a2 Alexander Schreiber
      }
3045 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3046 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3047 bf6929a2 Alexander Schreiber
    return env, nl, nl
3048 bf6929a2 Alexander Schreiber
3049 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3050 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3051 bf6929a2 Alexander Schreiber

3052 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3053 bf6929a2 Alexander Schreiber

3054 bf6929a2 Alexander Schreiber
    """
3055 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3056 e873317a Guido Trotter
    assert self.instance is not None, \
3057 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3058 bf6929a2 Alexander Schreiber
3059 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3060 7527a8a4 Iustin Pop
3061 5bbd3f7f Michael Hanselmann
    # check bridges existence
3062 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3063 bf6929a2 Alexander Schreiber
3064 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3065 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3066 bf6929a2 Alexander Schreiber

3067 bf6929a2 Alexander Schreiber
    """
3068 bf6929a2 Alexander Schreiber
    instance = self.instance
3069 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3070 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3071 bf6929a2 Alexander Schreiber
3072 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3073 bf6929a2 Alexander Schreiber
3074 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3075 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3076 ae48ac32 Iustin Pop
      for disk in instance.disks:
3077 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3078 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3079 07813a9e Iustin Pop
                                             reboot_type)
3080 4c4e4e1e Iustin Pop
      result.Raise("Could not reboot instance")
3081 bf6929a2 Alexander Schreiber
    else:
3082 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3083 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance for full reboot")
3084 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3085 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3086 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3087 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3088 dd279568 Iustin Pop
      if msg:
3089 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3090 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3091 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3092 bf6929a2 Alexander Schreiber
3093 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3094 bf6929a2 Alexander Schreiber
3095 bf6929a2 Alexander Schreiber
3096 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3097 a8083063 Iustin Pop
  """Shutdown an instance.
3098 a8083063 Iustin Pop

3099 a8083063 Iustin Pop
  """
3100 a8083063 Iustin Pop
  HPATH = "instance-stop"
3101 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3102 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3103 e873317a Guido Trotter
  REQ_BGL = False
3104 e873317a Guido Trotter
3105 e873317a Guido Trotter
  def ExpandNames(self):
3106 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3107 a8083063 Iustin Pop
3108 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3109 a8083063 Iustin Pop
    """Build hooks env.
3110 a8083063 Iustin Pop

3111 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3112 a8083063 Iustin Pop

3113 a8083063 Iustin Pop
    """
3114 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3115 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3116 a8083063 Iustin Pop
    return env, nl, nl
3117 a8083063 Iustin Pop
3118 a8083063 Iustin Pop
  def CheckPrereq(self):
3119 a8083063 Iustin Pop
    """Check prerequisites.
3120 a8083063 Iustin Pop

3121 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3122 a8083063 Iustin Pop

3123 a8083063 Iustin Pop
    """
3124 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3125 e873317a Guido Trotter
    assert self.instance is not None, \
3126 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3127 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3128 a8083063 Iustin Pop
3129 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3130 a8083063 Iustin Pop
    """Shutdown the instance.
3131 a8083063 Iustin Pop

3132 a8083063 Iustin Pop
    """
3133 a8083063 Iustin Pop
    instance = self.instance
3134 a8083063 Iustin Pop
    node_current = instance.primary_node
3135 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3136 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3137 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3138 1fae010f Iustin Pop
    if msg:
3139 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3140 a8083063 Iustin Pop
3141 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3142 a8083063 Iustin Pop
3143 a8083063 Iustin Pop
3144 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3145 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3146 fe7b0351 Michael Hanselmann

3147 fe7b0351 Michael Hanselmann
  """
3148 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3149 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3150 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3151 4e0b4d2d Guido Trotter
  REQ_BGL = False
3152 4e0b4d2d Guido Trotter
3153 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3154 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3155 fe7b0351 Michael Hanselmann
3156 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3157 fe7b0351 Michael Hanselmann
    """Build hooks env.
3158 fe7b0351 Michael Hanselmann

3159 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3160 fe7b0351 Michael Hanselmann

3161 fe7b0351 Michael Hanselmann
    """
3162 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3163 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3164 fe7b0351 Michael Hanselmann
    return env, nl, nl
3165 fe7b0351 Michael Hanselmann
3166 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3167 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3168 fe7b0351 Michael Hanselmann

3169 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3170 fe7b0351 Michael Hanselmann

3171 fe7b0351 Michael Hanselmann
    """
3172 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3173 4e0b4d2d Guido Trotter
    assert instance is not None, \
3174 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3175 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3176 4e0b4d2d Guido Trotter
3177 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3178 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3179 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3180 0d68c45d Iustin Pop
    if instance.admin_up:
3181 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3182 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3183 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3184 72737a7f Iustin Pop
                                              instance.name,
3185 72737a7f Iustin Pop
                                              instance.hypervisor)
3186 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3187 4c4e4e1e Iustin Pop
                      prereq=True)
3188 7ad1af4a Iustin Pop
    if remote_info.payload:
3189 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3190 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3191 3ecf6786 Iustin Pop
                                  instance.primary_node))
3192 d0834de3 Michael Hanselmann
3193 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3194 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3195 d0834de3 Michael Hanselmann
      # OS verification
3196 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3197 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3198 d0834de3 Michael Hanselmann
      if pnode is None:
3199 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3200 3ecf6786 Iustin Pop
                                   self.op.pnode)
3201 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3202 4c4e4e1e Iustin Pop
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3203 4c4e4e1e Iustin Pop
                   (self.op.os_type, pnode.name), prereq=True)
3204 d0834de3 Michael Hanselmann
3205 fe7b0351 Michael Hanselmann
    self.instance = instance
3206 fe7b0351 Michael Hanselmann
3207 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3208 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3209 fe7b0351 Michael Hanselmann

3210 fe7b0351 Michael Hanselmann
    """
3211 fe7b0351 Michael Hanselmann
    inst = self.instance
3212 fe7b0351 Michael Hanselmann
3213 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3214 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3215 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3216 97abc79f Iustin Pop
      self.cfg.Update(inst)
3217 d0834de3 Michael Hanselmann
3218 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3219 fe7b0351 Michael Hanselmann
    try:
3220 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3221 e557bae9 Guido Trotter
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3222 4c4e4e1e Iustin Pop
      result.Raise("Could not install OS for instance %s on node %s" %
3223 4c4e4e1e Iustin Pop
                   (inst.name, inst.primary_node))
3224 fe7b0351 Michael Hanselmann
    finally:
3225 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3226 fe7b0351 Michael Hanselmann
3227 fe7b0351 Michael Hanselmann
3228 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3229 decd5f45 Iustin Pop
  """Rename an instance.
3230 decd5f45 Iustin Pop

3231 decd5f45 Iustin Pop
  """
3232 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3233 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3234 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3235 decd5f45 Iustin Pop
3236 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3237 decd5f45 Iustin Pop
    """Build hooks env.
3238 decd5f45 Iustin Pop

3239 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3240 decd5f45 Iustin Pop

3241 decd5f45 Iustin Pop
    """
3242 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3243 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3244 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3245 decd5f45 Iustin Pop
    return env, nl, nl
3246 decd5f45 Iustin Pop
3247 decd5f45 Iustin Pop
  def CheckPrereq(self):
3248 decd5f45 Iustin Pop
    """Check prerequisites.
3249 decd5f45 Iustin Pop

3250 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3251 decd5f45 Iustin Pop

3252 decd5f45 Iustin Pop
    """
3253 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3254 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3255 decd5f45 Iustin Pop
    if instance is None:
3256 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3257 decd5f45 Iustin Pop
                                 self.op.instance_name)
3258 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3259 7527a8a4 Iustin Pop
3260 0d68c45d Iustin Pop
    if instance.admin_up:
3261 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3262 decd5f45 Iustin Pop
                                 self.op.instance_name)
3263 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3264 72737a7f Iustin Pop
                                              instance.name,
3265 72737a7f Iustin Pop
                                              instance.hypervisor)
3266 4c4e4e1e Iustin Pop
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3267 4c4e4e1e Iustin Pop
                      prereq=True)
3268 7ad1af4a Iustin Pop
    if remote_info.payload:
3269 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3270 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3271 decd5f45 Iustin Pop
                                  instance.primary_node))
3272 decd5f45 Iustin Pop
    self.instance = instance
3273 decd5f45 Iustin Pop
3274 decd5f45 Iustin Pop
    # new name verification
3275 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3276 decd5f45 Iustin Pop
3277 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3278 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3279 7bde3275 Guido Trotter
    if new_name in instance_list:
3280 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3281 c09f363f Manuel Franceschini
                                 new_name)
3282 7bde3275 Guido Trotter
3283 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3284 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3285 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3286 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3287 decd5f45 Iustin Pop
3288 decd5f45 Iustin Pop
3289 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3290 decd5f45 Iustin Pop
    """Reinstall the instance.
3291 decd5f45 Iustin Pop

3292 decd5f45 Iustin Pop
    """
3293 decd5f45 Iustin Pop
    inst = self.instance
3294 decd5f45 Iustin Pop
    old_name = inst.name
3295 decd5f45 Iustin Pop
3296 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3297 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3298 b23c4333 Manuel Franceschini
3299 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3300 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3301 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3302 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3303 decd5f45 Iustin Pop
3304 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3305 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3306 decd5f45 Iustin Pop
3307 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3308 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3309 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3310 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3311 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3312 4c4e4e1e Iustin Pop
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3313 4c4e4e1e Iustin Pop
                   " (but the instance has been renamed in Ganeti)" %
3314 4c4e4e1e Iustin Pop
                   (inst.primary_node, old_file_storage_dir,
3315 4c4e4e1e Iustin Pop
                    new_file_storage_dir))
3316 b23c4333 Manuel Franceschini
3317 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3318 decd5f45 Iustin Pop
    try:
3319 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3320 781de953 Iustin Pop
                                                 old_name)
3321 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3322 96841384 Iustin Pop
      if msg:
3323 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3324 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3325 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3326 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3327 decd5f45 Iustin Pop
    finally:
3328 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3329 decd5f45 Iustin Pop
3330 decd5f45 Iustin Pop
3331 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3332 a8083063 Iustin Pop
  """Remove an instance.
3333 a8083063 Iustin Pop

3334 a8083063 Iustin Pop
  """
3335 a8083063 Iustin Pop
  HPATH = "instance-remove"
3336 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3337 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3338 cf472233 Guido Trotter
  REQ_BGL = False
3339 cf472233 Guido Trotter
3340 cf472233 Guido Trotter
  def ExpandNames(self):
3341 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3342 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3343 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3344 cf472233 Guido Trotter
3345 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3346 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3347 cf472233 Guido Trotter
      self._LockInstancesNodes()
3348 a8083063 Iustin Pop
3349 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3350 a8083063 Iustin Pop
    """Build hooks env.
3351 a8083063 Iustin Pop

3352 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3353 a8083063 Iustin Pop

3354 a8083063 Iustin Pop
    """
3355 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3356 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3357 a8083063 Iustin Pop
    return env, nl, nl
3358 a8083063 Iustin Pop
3359 a8083063 Iustin Pop
  def CheckPrereq(self):
3360 a8083063 Iustin Pop
    """Check prerequisites.
3361 a8083063 Iustin Pop

3362 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3363 a8083063 Iustin Pop

3364 a8083063 Iustin Pop
    """
3365 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3366 cf472233 Guido Trotter
    assert self.instance is not None, \
3367 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3368 a8083063 Iustin Pop
3369 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3370 a8083063 Iustin Pop
    """Remove the instance.
3371 a8083063 Iustin Pop

3372 a8083063 Iustin Pop
    """
3373 a8083063 Iustin Pop
    instance = self.instance
3374 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3375 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3376 a8083063 Iustin Pop
3377 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3378 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3379 1fae010f Iustin Pop
    if msg:
3380 1d67656e Iustin Pop
      if self.op.ignore_failures:
3381 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3382 1d67656e Iustin Pop
      else:
3383 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3384 1fae010f Iustin Pop
                                 " node %s: %s" %
3385 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3386 a8083063 Iustin Pop
3387 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3388 a8083063 Iustin Pop
3389 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3390 1d67656e Iustin Pop
      if self.op.ignore_failures:
3391 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3392 1d67656e Iustin Pop
      else:
3393 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3394 a8083063 Iustin Pop
3395 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3396 a8083063 Iustin Pop
3397 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3398 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3399 a8083063 Iustin Pop
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3402 a8083063 Iustin Pop
  """Logical unit for querying instances.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
  """
3405 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3406 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3407 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3408 5b460366 Iustin Pop
                                    "admin_state",
3409 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3410 638c6349 Guido Trotter
                                    "nic_mode", "nic_link",
3411 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3412 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3413 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3414 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3415 638c6349 Guido Trotter
                                    r"(nic)\.(mac|ip|mode|link)/([0-9]+)",
3416 638c6349 Guido Trotter
                                    r"(nic)\.(bridge)/([0-9]+)",
3417 638c6349 Guido Trotter
                                    r"(nic)\.(macs|ips|modes|links|bridges)",
3418 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3419 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3420 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3421 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3422 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3423 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3424 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3425 31bf511f Iustin Pop
3426 a8083063 Iustin Pop
3427 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3428 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3429 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3430 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3431 a8083063 Iustin Pop
3432 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3433 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3434 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3435 7eb9d8f7 Guido Trotter
3436 57a2fb91 Iustin Pop
    if self.op.names:
3437 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3438 7eb9d8f7 Guido Trotter
    else:
3439 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3440 7eb9d8f7 Guido Trotter
3441 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3442 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3443 57a2fb91 Iustin Pop
    if self.do_locking:
3444 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3445 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3446 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3447 7eb9d8f7 Guido Trotter
3448 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3449 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3450 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3451 7eb9d8f7 Guido Trotter
3452 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3453 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3454 7eb9d8f7 Guido Trotter

3455 7eb9d8f7 Guido Trotter
    """
3456 57a2fb91 Iustin Pop
    pass
3457 069dcc86 Iustin Pop
3458 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3459 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3460 a8083063 Iustin Pop

3461 a8083063 Iustin Pop
    """
3462 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3463 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3464 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3465 a7f5dc98 Iustin Pop
      if self.do_locking:
3466 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3467 a7f5dc98 Iustin Pop
      else:
3468 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3469 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3470 57a2fb91 Iustin Pop
    else:
3471 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3472 a7f5dc98 Iustin Pop
      if self.do_locking:
3473 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3474 a7f5dc98 Iustin Pop
      else:
3475 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3476 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3477 a7f5dc98 Iustin Pop
      if missing:
3478 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3479 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3480 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3481 c1f1cbb2 Iustin Pop
3482 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3483 a8083063 Iustin Pop
3484 a8083063 Iustin Pop
    # begin data gathering
3485 a8083063 Iustin Pop
3486 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3487 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3488 a8083063 Iustin Pop
3489 a8083063 Iustin Pop
    bad_nodes = []
3490 cbfc4681 Iustin Pop
    off_nodes = []
3491 ec79568d Iustin Pop
    if self.do_node_query:
3492 a8083063 Iustin Pop
      live_data = {}
3493 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3494 a8083063 Iustin Pop
      for name in nodes:
3495 a8083063 Iustin Pop
        result = node_data[name]
3496 cbfc4681 Iustin Pop
        if result.offline:
3497 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3498 cbfc4681 Iustin Pop
          off_nodes.append(name)
3499 4c4e4e1e Iustin Pop
        if result.failed or result.fail_msg:
3500 a8083063 Iustin Pop
          bad_nodes.append(name)
3501 781de953 Iustin Pop
        else:
3502 2fa74ef4 Iustin Pop
          if result.payload:
3503 2fa74ef4 Iustin Pop
            live_data.update(result.payload)
3504 2fa74ef4 Iustin Pop
          # else no instance is alive
3505 a8083063 Iustin Pop
    else:
3506 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3507 a8083063 Iustin Pop
3508 a8083063 Iustin Pop
    # end data gathering
3509 a8083063 Iustin Pop
3510 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3511 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3512 a8083063 Iustin Pop
    output = []
3513 638c6349 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
3514 a8083063 Iustin Pop
    for instance in instance_list:
3515 a8083063 Iustin Pop
      iout = []
3516 638c6349 Guido Trotter
      i_hv = cluster.FillHV(instance)
3517 638c6349 Guido Trotter
      i_be = cluster.FillBE(instance)
3518 638c6349 Guido Trotter
      i_nicp = [objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
3519 638c6349 Guido Trotter
                                 nic.nicparams) for nic in instance.nics]
3520 a8083063 Iustin Pop
      for field in self.op.output_fields:
3521 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3522 a8083063 Iustin Pop
        if field == "name":
3523 a8083063 Iustin Pop
          val = instance.name
3524 a8083063 Iustin Pop
        elif field == "os":
3525 a8083063 Iustin Pop
          val = instance.os
3526 a8083063 Iustin Pop
        elif field == "pnode":
3527 a8083063 Iustin Pop
          val = instance.primary_node
3528 a8083063 Iustin Pop
        elif field == "snodes":
3529 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3530 a8083063 Iustin Pop
        elif field == "admin_state":
3531 0d68c45d Iustin Pop
          val = instance.admin_up
3532 a8083063 Iustin Pop
        elif field == "oper_state":
3533 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3534 8a23d2d3 Iustin Pop
            val = None
3535 a8083063 Iustin Pop
          else:
3536 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3537 d8052456 Iustin Pop
        elif field == "status":
3538 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3539 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3540 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3541 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3542 d8052456 Iustin Pop
          else:
3543 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3544 d8052456 Iustin Pop
            if running:
3545 0d68c45d Iustin Pop
              if instance.admin_up:
3546 d8052456 Iustin Pop
                val = "running"
3547 d8052456 Iustin Pop
              else:
3548 d8052456 Iustin Pop
                val = "ERROR_up"
3549 d8052456 Iustin Pop
            else:
3550 0d68c45d Iustin Pop
              if instance.admin_up:
3551 d8052456 Iustin Pop
                val = "ERROR_down"
3552 d8052456 Iustin Pop
              else:
3553 d8052456 Iustin Pop
                val = "ADMIN_down"
3554 a8083063 Iustin Pop
        elif field == "oper_ram":
3555 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3556 8a23d2d3 Iustin Pop
            val = None
3557 a8083063 Iustin Pop
          elif instance.name in live_data:
3558 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3559 a8083063 Iustin Pop
          else:
3560 a8083063 Iustin Pop
            val = "-"
3561 c1ce76bb Iustin Pop
        elif field == "vcpus":
3562 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3563 a8083063 Iustin Pop
        elif field == "disk_template":
3564 a8083063 Iustin Pop
          val = instance.disk_template
3565 a8083063 Iustin Pop
        elif field == "ip":
3566 39a02558 Guido Trotter
          if instance.nics:
3567 39a02558 Guido Trotter
            val = instance.nics[0].ip
3568 39a02558 Guido Trotter
          else:
3569 39a02558 Guido Trotter
            val = None
3570 638c6349 Guido Trotter
        elif field == "nic_mode":
3571 638c6349 Guido Trotter
          if instance.nics:
3572 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_MODE]
3573 638c6349 Guido Trotter
          else:
3574 638c6349 Guido Trotter
            val = None
3575 638c6349 Guido Trotter
        elif field == "nic_link":
3576 39a02558 Guido Trotter
          if instance.nics:
3577 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3578 638c6349 Guido Trotter
          else:
3579 638c6349 Guido Trotter
            val = None
3580 638c6349 Guido Trotter
        elif field == "bridge":
3581 638c6349 Guido Trotter
          if (instance.nics and
3582 638c6349 Guido Trotter
              i_nicp[0][constants.NIC_MODE] == constants.NIC_MODE_BRIDGED):
3583 638c6349 Guido Trotter
            val = i_nicp[0][constants.NIC_LINK]
3584 39a02558 Guido Trotter
          else:
3585 39a02558 Guido Trotter
            val = None
3586 a8083063 Iustin Pop
        elif field == "mac":
3587 39a02558 Guido Trotter
          if instance.nics:
3588 39a02558 Guido Trotter
            val = instance.nics[0].mac
3589 39a02558 Guido Trotter
          else:
3590 39a02558 Guido Trotter
            val = None
3591 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3592 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3593 ad24e046 Iustin Pop
          try:
3594 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3595 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3596 8a23d2d3 Iustin Pop
            val = None
3597 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3598 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3599 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3600 130a6a6f Iustin Pop
        elif field == "tags":
3601 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3602 38d7239a Iustin Pop
        elif field == "serial_no":
3603 38d7239a Iustin Pop
          val = instance.serial_no
3604 5018a335 Iustin Pop
        elif field == "network_port":
3605 5018a335 Iustin Pop
          val = instance.network_port
3606 338e51e8 Iustin Pop
        elif field == "hypervisor":
3607 338e51e8 Iustin Pop
          val = instance.hypervisor
3608 338e51e8 Iustin Pop
        elif field == "hvparams":
3609 338e51e8 Iustin Pop
          val = i_hv
3610 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3611 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3612 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3613 338e51e8 Iustin Pop
        elif field == "beparams":
3614 338e51e8 Iustin Pop
          val = i_be
3615 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3616 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3617 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3618 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3619 71c1af58 Iustin Pop
          # matches a variable list
3620 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3621 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3622 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3623 71c1af58 Iustin Pop
              val = len(instance.disks)
3624 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3625 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3626 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3627 3e0cea06 Iustin Pop
              try:
3628 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3629 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3630 71c1af58 Iustin Pop
                val = None
3631 71c1af58 Iustin Pop
            else:
3632 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3633 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3634 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3635 71c1af58 Iustin Pop
              val = len(instance.nics)
3636 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3637 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3638 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3639 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3640 638c6349 Guido Trotter
            elif st_groups[1] == "modes":
3641 638c6349 Guido Trotter
              val = [nicp[constants.NIC_MODE] for nicp in i_nicp]
3642 638c6349 Guido Trotter
            elif st_groups[1] == "links":
3643 638c6349 Guido Trotter
              val = [nicp[constants.NIC_LINK] for nicp in i_nicp]
3644 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3645 638c6349 Guido Trotter
              val = []
3646 638c6349 Guido Trotter
              for nicp in i_nicp:
3647 638c6349 Guido Trotter
                if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3648 638c6349 Guido Trotter
                  val.append(nicp[constants.NIC_LINK])
3649 638c6349 Guido Trotter
                else:
3650 638c6349 Guido Trotter
                  val.append(None)
3651 71c1af58 Iustin Pop
            else:
3652 71c1af58 Iustin Pop
              # index-based item
3653 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3654 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3655 71c1af58 Iustin Pop
                val = None
3656 71c1af58 Iustin Pop
              else:
3657 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3658 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3659 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3660 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3661 638c6349 Guido Trotter
                elif st_groups[1] == "mode":
3662 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_MODE]
3663 638c6349 Guido Trotter
                elif st_groups[1] == "link":
3664 638c6349 Guido Trotter
                  val = i_nicp[nic_idx][constants.NIC_LINK]
3665 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3666 638c6349 Guido Trotter
                  nic_mode = i_nicp[nic_idx][constants.NIC_MODE]
3667 638c6349 Guido Trotter
                  if nic_mode == constants.NIC_MODE_BRIDGED:
3668 638c6349 Guido Trotter
                    val = i_nicp[nic_idx][constants.NIC_LINK]
3669 638c6349 Guido Trotter
                  else:
3670 638c6349 Guido Trotter
                    val = None
3671 71c1af58 Iustin Pop
                else:
3672 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3673 71c1af58 Iustin Pop
          else:
3674 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3675 c1ce76bb Iustin Pop
                           field)
3676 a8083063 Iustin Pop
        else:
3677 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3678 a8083063 Iustin Pop
        iout.append(val)
3679 a8083063 Iustin Pop
      output.append(iout)
3680 a8083063 Iustin Pop
3681 a8083063 Iustin Pop
    return output
3682 a8083063 Iustin Pop
3683 a8083063 Iustin Pop
3684 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3685 a8083063 Iustin Pop
  """Failover an instance.
3686 a8083063 Iustin Pop

3687 a8083063 Iustin Pop
  """
3688 a8083063 Iustin Pop
  HPATH = "instance-failover"
3689 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3690 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3691 c9e5c064 Guido Trotter
  REQ_BGL = False
3692 c9e5c064 Guido Trotter
3693 c9e5c064 Guido Trotter
  def ExpandNames(self):
3694 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3695 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3696 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3697 c9e5c064 Guido Trotter
3698 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3699 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3700 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3701 a8083063 Iustin Pop
3702 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3703 a8083063 Iustin Pop
    """Build hooks env.
3704 a8083063 Iustin Pop

3705 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3706 a8083063 Iustin Pop

3707 a8083063 Iustin Pop
    """
3708 a8083063 Iustin Pop
    env = {
3709 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3710 a8083063 Iustin Pop
      }
3711 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3712 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3713 a8083063 Iustin Pop
    return env, nl, nl
3714 a8083063 Iustin Pop
3715 a8083063 Iustin Pop
  def CheckPrereq(self):
3716 a8083063 Iustin Pop
    """Check prerequisites.
3717 a8083063 Iustin Pop

3718 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3719 a8083063 Iustin Pop

3720 a8083063 Iustin Pop
    """
3721 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3722 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3723 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3724 a8083063 Iustin Pop
3725 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3726 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3727 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3728 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3729 2a710df1 Michael Hanselmann
3730 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3731 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3732 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3733 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3734 2a710df1 Michael Hanselmann
3735 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3736 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3737 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3738 d27776f0 Iustin Pop
    if instance.admin_up:
3739 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3740 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3741 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3742 d27776f0 Iustin Pop
                           instance.hypervisor)
3743 d27776f0 Iustin Pop
    else:
3744 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3745 d27776f0 Iustin Pop
                   " instance will not be started")
3746 3a7c308e Guido Trotter
3747 a8083063 Iustin Pop
    # check bridge existance
3748 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3749 a8083063 Iustin Pop
3750 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3751 a8083063 Iustin Pop
    """Failover an instance.
3752 a8083063 Iustin Pop

3753 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3754 a8083063 Iustin Pop
    starting it on the secondary.
3755 a8083063 Iustin Pop

3756 a8083063 Iustin Pop
    """
3757 a8083063 Iustin Pop
    instance = self.instance
3758 a8083063 Iustin Pop
3759 a8083063 Iustin Pop
    source_node = instance.primary_node
3760 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3761 a8083063 Iustin Pop
3762 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3763 a8083063 Iustin Pop
    for dev in instance.disks:
3764 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3765 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3766 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3767 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3768 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3769 a8083063 Iustin Pop
3770 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3771 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3772 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3773 a8083063 Iustin Pop
3774 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3775 4c4e4e1e Iustin Pop
    msg = result.fail_msg
3776 1fae010f Iustin Pop
    if msg:
3777 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3778 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3779 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3780 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3781 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3782 24a40d57 Iustin Pop
      else:
3783 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3784 1fae010f Iustin Pop
                                 " node %s: %s" %
3785 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3786 a8083063 Iustin Pop
3787 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3788 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3789 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3790 a8083063 Iustin Pop
3791 a8083063 Iustin Pop
    instance.primary_node = target_node
3792 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3793 b6102dab Guido Trotter
    self.cfg.Update(instance)
3794 a8083063 Iustin Pop
3795 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3796 0d68c45d Iustin Pop
    if instance.admin_up:
3797 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3798 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3799 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3800 12a0cfbe Guido Trotter
3801 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3802 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3803 12a0cfbe Guido Trotter
      if not disks_ok:
3804 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3805 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3806 a8083063 Iustin Pop
3807 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3808 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3809 4c4e4e1e Iustin Pop
      msg = result.fail_msg
3810 dd279568 Iustin Pop
      if msg:
3811 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3812 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3813 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3814 a8083063 Iustin Pop
3815 a8083063 Iustin Pop
3816 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3817 53c776b5 Iustin Pop
  """Migrate an instance.
3818 53c776b5 Iustin Pop

3819 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3820 53c776b5 Iustin Pop
  which is done with shutdown.
3821 53c776b5 Iustin Pop

3822 53c776b5 Iustin Pop
  """
3823 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3824 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3825 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3826 53c776b5 Iustin Pop
3827 53c776b5 Iustin Pop
  REQ_BGL = False
3828 53c776b5 Iustin Pop
3829 53c776b5 Iustin Pop
  def ExpandNames(self):
3830 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3831 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3832 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3833 53c776b5 Iustin Pop
3834 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3835 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3836 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3837 53c776b5 Iustin Pop
3838 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3839 53c776b5 Iustin Pop
    """Build hooks env.
3840 53c776b5 Iustin Pop

3841 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3842 53c776b5 Iustin Pop

3843 53c776b5 Iustin Pop
    """
3844 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3845 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3846 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3847 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3848 53c776b5 Iustin Pop
    return env, nl, nl
3849 53c776b5 Iustin Pop
3850 53c776b5 Iustin Pop
  def CheckPrereq(self):
3851 53c776b5 Iustin Pop
    """Check prerequisites.
3852 53c776b5 Iustin Pop

3853 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3854 53c776b5 Iustin Pop

3855 53c776b5 Iustin Pop
    """
3856 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3857 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3858 53c776b5 Iustin Pop
    if instance is None:
3859 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3860 53c776b5 Iustin Pop
                                 self.op.instance_name)
3861 53c776b5 Iustin Pop
3862 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3863 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3864 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3865 53c776b5 Iustin Pop
3866 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3867 53c776b5 Iustin Pop
    if not secondary_nodes:
3868 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3869 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3870 53c776b5 Iustin Pop
3871 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3872 53c776b5 Iustin Pop
3873 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3874 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3875 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3876 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3877 53c776b5 Iustin Pop
                         instance.hypervisor)
3878 53c776b5 Iustin Pop
3879 53c776b5 Iustin Pop
    # check bridge existance
3880 b165e77e Guido Trotter
    _CheckInstanceBridgesExist(self, instance, node=target_node)
3881 53c776b5 Iustin Pop
3882 53c776b5 Iustin Pop
    if not self.op.cleanup:
3883 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3884 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3885 53c776b5 Iustin Pop
                                                 instance)
3886 4c4e4e1e Iustin Pop
      result.Raise("Can't migrate, please use failover", prereq=True)
3887 53c776b5 Iustin Pop
3888 53c776b5 Iustin Pop
    self.instance = instance
3889 53c776b5 Iustin Pop
3890 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3891 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3892 53c776b5 Iustin Pop

3893 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3894 53c776b5 Iustin Pop

3895 53c776b5 Iustin Pop
    """
3896 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3897 53c776b5 Iustin Pop
    all_done = False
3898 53c776b5 Iustin Pop
    while not all_done:
3899 53c776b5 Iustin Pop
      all_done = True
3900 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3901 53c776b5 Iustin Pop
                                            self.nodes_ip,
3902 53c776b5 Iustin Pop
                                            self.instance.disks)
3903 53c776b5 Iustin Pop
      min_percent = 100
3904 53c776b5 Iustin Pop
      for node, nres in result.items():
3905 4c4e4e1e Iustin Pop
        nres.Raise("Cannot resync disks on node %s" % node)
3906 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3907 53c776b5 Iustin Pop
        all_done = all_done and node_done
3908 53c776b5 Iustin Pop
        if node_percent is not None:
3909 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3910 53c776b5 Iustin Pop
      if not all_done:
3911 53c776b5 Iustin Pop
        if min_percent < 100:
3912 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3913 53c776b5 Iustin Pop
        time.sleep(2)
3914 53c776b5 Iustin Pop
3915 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3916 53c776b5 Iustin Pop
    """Demote a node to secondary.
3917 53c776b5 Iustin Pop

3918 53c776b5 Iustin Pop
    """
3919 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3920 53c776b5 Iustin Pop
3921 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3922 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3923 53c776b5 Iustin Pop
3924 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3925 53c776b5 Iustin Pop
                                          self.instance.disks)
3926 4c4e4e1e Iustin Pop
    result.Raise("Cannot change disk to secondary on node %s" % node)
3927 53c776b5 Iustin Pop
3928 53c776b5 Iustin Pop
  def _GoStandalone(self):
3929 53c776b5 Iustin Pop
    """Disconnect from the network.
3930 53c776b5 Iustin Pop

3931 53c776b5 Iustin Pop
    """
3932 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3933 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3934 53c776b5 Iustin Pop
                                               self.instance.disks)
3935 53c776b5 Iustin Pop
    for node, nres in result.items():
3936 4c4e4e1e Iustin Pop
      nres.Raise("Cannot disconnect disks node %s" % node)
3937 53c776b5 Iustin Pop
3938 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3939 53c776b5 Iustin Pop
    """Reconnect to the network.
3940 53c776b5 Iustin Pop

3941 53c776b5 Iustin Pop
    """
3942 53c776b5 Iustin Pop
    if multimaster:
3943 53c776b5 Iustin Pop
      msg = "dual-master"
3944 53c776b5 Iustin Pop
    else:
3945 53c776b5 Iustin Pop
      msg = "single-master"
3946 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3947 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3948 53c776b5 Iustin Pop
                                           self.instance.disks,
3949 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3950 53c776b5 Iustin Pop
    for node, nres in result.items():
3951 4c4e4e1e Iustin Pop
      nres.Raise("Cannot change disks config on node %s" % node)
3952 53c776b5 Iustin Pop
3953 53c776b5 Iustin Pop
  def _ExecCleanup(self):
3954 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
3955 53c776b5 Iustin Pop

3956 53c776b5 Iustin Pop
    The cleanup is done by:
3957 53c776b5 Iustin Pop
      - check that the instance is running only on one node
3958 53c776b5 Iustin Pop
        (and update the config if needed)
3959 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
3960 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
3961 53c776b5 Iustin Pop
      - disconnect from the network
3962 53c776b5 Iustin Pop
      - change disks into single-master mode
3963 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
3964 53c776b5 Iustin Pop

3965 53c776b5 Iustin Pop
    """
3966 53c776b5 Iustin Pop
    instance = self.instance
3967 53c776b5 Iustin Pop
    target_node = self.target_node
3968 53c776b5 Iustin Pop
    source_node = self.source_node
3969 53c776b5 Iustin Pop
3970 53c776b5 Iustin Pop
    # check running on only one node
3971 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
3972 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
3973 53c776b5 Iustin Pop
                     " a bad state)")
3974 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3975 53c776b5 Iustin Pop
    for node, result in ins_l.items():
3976 4c4e4e1e Iustin Pop
      result.Raise("Can't contact node %s" % node)
3977 53c776b5 Iustin Pop
3978 aca13712 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].payload
3979 aca13712 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].payload
3980 53c776b5 Iustin Pop
3981 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
3982 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
3983 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
3984 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
3985 53c776b5 Iustin Pop
                               " and restart this operation.")
3986 53c776b5 Iustin Pop
3987 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
3988 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
3989 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
3990 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
3991 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
3992 53c776b5 Iustin Pop
3993 53c776b5 Iustin Pop
    if runningon_target:
3994 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
3995 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
3996 53c776b5 Iustin Pop
                       " updating config" % target_node)
3997 53c776b5 Iustin Pop
      instance.primary_node = target_node
3998 53c776b5 Iustin Pop
      self.cfg.Update(instance)
3999 53c776b5 Iustin Pop
      demoted_node = source_node
4000 53c776b5 Iustin Pop
    else:
4001 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4002 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4003 53c776b5 Iustin Pop
      demoted_node = target_node
4004 53c776b5 Iustin Pop
4005 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4006 53c776b5 Iustin Pop
    try:
4007 53c776b5 Iustin Pop
      self._WaitUntilSync()
4008 53c776b5 Iustin Pop
    except errors.OpExecError:
4009 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4010 53c776b5 Iustin Pop
      # won't be able to sync
4011 53c776b5 Iustin Pop
      pass
4012 53c776b5 Iustin Pop
    self._GoStandalone()
4013 53c776b5 Iustin Pop
    self._GoReconnect(False)
4014 53c776b5 Iustin Pop
    self._WaitUntilSync()
4015 53c776b5 Iustin Pop
4016 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4017 53c776b5 Iustin Pop
4018 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4019 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4020 6906a9d8 Guido Trotter

4021 6906a9d8 Guido Trotter
    """
4022 6906a9d8 Guido Trotter
    target_node = self.target_node
4023 6906a9d8 Guido Trotter
    try:
4024 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4025 6906a9d8 Guido Trotter
      self._GoStandalone()
4026 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4027 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4028 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4029 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4030 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4031 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4032 6906a9d8 Guido Trotter
                      str(err))
4033 6906a9d8 Guido Trotter
4034 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4035 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4036 6906a9d8 Guido Trotter

4037 6906a9d8 Guido Trotter
    """
4038 6906a9d8 Guido Trotter
    instance = self.instance
4039 6906a9d8 Guido Trotter
    target_node = self.target_node
4040 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4041 6906a9d8 Guido Trotter
4042 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4043 6906a9d8 Guido Trotter
                                                    instance,
4044 6906a9d8 Guido Trotter
                                                    migration_info,
4045 6906a9d8 Guido Trotter
                                                    False)
4046 4c4e4e1e Iustin Pop
    abort_msg = abort_result.fail_msg
4047 6906a9d8 Guido Trotter
    if abort_msg:
4048 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4049 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4050 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4051 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4052 6906a9d8 Guido Trotter
4053 53c776b5 Iustin Pop
  def _ExecMigration(self):
4054 53c776b5 Iustin Pop
    """Migrate an instance.
4055 53c776b5 Iustin Pop

4056 53c776b5 Iustin Pop
    The migrate is done by:
4057 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4058 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4059 53c776b5 Iustin Pop
      - migrate the instance
4060 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4061 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4062 53c776b5 Iustin Pop
      - change disks into single-master mode
4063 53c776b5 Iustin Pop

4064 53c776b5 Iustin Pop
    """
4065 53c776b5 Iustin Pop
    instance = self.instance
4066 53c776b5 Iustin Pop
    target_node = self.target_node
4067 53c776b5 Iustin Pop
    source_node = self.source_node
4068 53c776b5 Iustin Pop
4069 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4070 53c776b5 Iustin Pop
    for dev in instance.disks:
4071 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4072 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4073 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4074 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4075 53c776b5 Iustin Pop
4076 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4077 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4078 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4079 6906a9d8 Guido Trotter
    if msg:
4080 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4081 0959c824 Iustin Pop
                 (source_node, msg))
4082 6906a9d8 Guido Trotter
      logging.error(log_err)
4083 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4084 6906a9d8 Guido Trotter
4085 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4086 6906a9d8 Guido Trotter
4087 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4088 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4089 53c776b5 Iustin Pop
    self._GoStandalone()
4090 53c776b5 Iustin Pop
    self._GoReconnect(True)
4091 53c776b5 Iustin Pop
    self._WaitUntilSync()
4092 53c776b5 Iustin Pop
4093 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4094 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4095 6906a9d8 Guido Trotter
                                           instance,
4096 6906a9d8 Guido Trotter
                                           migration_info,
4097 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4098 6906a9d8 Guido Trotter
4099 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4100 6906a9d8 Guido Trotter
    if msg:
4101 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4102 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4103 6906a9d8 Guido Trotter
      self._AbortMigration()
4104 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4105 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4106 6906a9d8 Guido Trotter
                               (instance.name, msg))
4107 6906a9d8 Guido Trotter
4108 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4109 53c776b5 Iustin Pop
    time.sleep(10)
4110 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4111 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4112 53c776b5 Iustin Pop
                                            self.op.live)
4113 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4114 53c776b5 Iustin Pop
    if msg:
4115 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4116 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4117 6906a9d8 Guido Trotter
      self._AbortMigration()
4118 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4119 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4120 53c776b5 Iustin Pop
                               (instance.name, msg))
4121 53c776b5 Iustin Pop
    time.sleep(10)
4122 53c776b5 Iustin Pop
4123 53c776b5 Iustin Pop
    instance.primary_node = target_node
4124 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4125 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4126 53c776b5 Iustin Pop
4127 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4128 6906a9d8 Guido Trotter
                                              instance,
4129 6906a9d8 Guido Trotter
                                              migration_info,
4130 6906a9d8 Guido Trotter
                                              True)
4131 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4132 6906a9d8 Guido Trotter
    if msg:
4133 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4134 6906a9d8 Guido Trotter
                    " %s" % msg)
4135 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4136 6906a9d8 Guido Trotter
                               msg)
4137 6906a9d8 Guido Trotter
4138 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4139 53c776b5 Iustin Pop
    self._WaitUntilSync()
4140 53c776b5 Iustin Pop
    self._GoStandalone()
4141 53c776b5 Iustin Pop
    self._GoReconnect(False)
4142 53c776b5 Iustin Pop
    self._WaitUntilSync()
4143 53c776b5 Iustin Pop
4144 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4145 53c776b5 Iustin Pop
4146 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4147 53c776b5 Iustin Pop
    """Perform the migration.
4148 53c776b5 Iustin Pop

4149 53c776b5 Iustin Pop
    """
4150 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4151 53c776b5 Iustin Pop
4152 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4153 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4154 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4155 53c776b5 Iustin Pop
    self.nodes_ip = {
4156 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4157 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4158 53c776b5 Iustin Pop
      }
4159 53c776b5 Iustin Pop
    if self.op.cleanup:
4160 53c776b5 Iustin Pop
      return self._ExecCleanup()
4161 53c776b5 Iustin Pop
    else:
4162 53c776b5 Iustin Pop
      return self._ExecMigration()
4163 53c776b5 Iustin Pop
4164 53c776b5 Iustin Pop
4165 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4166 428958aa Iustin Pop
                    info, force_open):
4167 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4168 a8083063 Iustin Pop

4169 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4170 a8083063 Iustin Pop
  all its children.
4171 a8083063 Iustin Pop

4172 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4173 a8083063 Iustin Pop

4174 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4175 428958aa Iustin Pop
  @param node: the node on which to create the device
4176 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4177 428958aa Iustin Pop
  @param instance: the instance which owns the device
4178 428958aa Iustin Pop
  @type device: L{objects.Disk}
4179 428958aa Iustin Pop
  @param device: the device to create
4180 428958aa Iustin Pop
  @type force_create: boolean
4181 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4182 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4183 428958aa Iustin Pop
      CreateOnSecondary() attribute
4184 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4185 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4186 428958aa Iustin Pop
  @type force_open: boolean
4187 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4188 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4189 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4190 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4191 428958aa Iustin Pop

4192 a8083063 Iustin Pop
  """
4193 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4194 428958aa Iustin Pop
    force_create = True
4195 796cab27 Iustin Pop
4196 a8083063 Iustin Pop
  if device.children:
4197 a8083063 Iustin Pop
    for child in device.children:
4198 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4199 428958aa Iustin Pop
                      info, force_open)
4200 a8083063 Iustin Pop
4201 428958aa Iustin Pop
  if not force_create:
4202 796cab27 Iustin Pop
    return
4203 796cab27 Iustin Pop
4204 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4205 de12473a Iustin Pop
4206 de12473a Iustin Pop
4207 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4208 de12473a Iustin Pop
  """Create a single block device on a given node.
4209 de12473a Iustin Pop

4210 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4211 de12473a Iustin Pop
  created in advance.
4212 de12473a Iustin Pop

4213 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4214 de12473a Iustin Pop
  @param node: the node on which to create the device
4215 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4216 de12473a Iustin Pop
  @param instance: the instance which owns the device
4217 de12473a Iustin Pop
  @type device: L{objects.Disk}
4218 de12473a Iustin Pop
  @param device: the device to create
4219 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4220 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4221 de12473a Iustin Pop
  @type force_open: boolean
4222 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4223 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4224 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4225 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4226 de12473a Iustin Pop

4227 de12473a Iustin Pop
  """
4228 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4229 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4230 428958aa Iustin Pop
                                       instance.name, force_open, info)
4231 4c4e4e1e Iustin Pop
  result.Raise("Can't create block device %s on"
4232 4c4e4e1e Iustin Pop
               " node %s for instance %s" % (device, node, instance.name))
4233 a8083063 Iustin Pop
  if device.physical_id is None:
4234 0959c824 Iustin Pop
    device.physical_id = result.payload
4235 a8083063 Iustin Pop
4236 a8083063 Iustin Pop
4237 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4238 923b1523 Iustin Pop
  """Generate a suitable LV name.
4239 923b1523 Iustin Pop

4240 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4241 923b1523 Iustin Pop

4242 923b1523 Iustin Pop
  """
4243 923b1523 Iustin Pop
  results = []
4244 923b1523 Iustin Pop
  for val in exts:
4245 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4246 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4247 923b1523 Iustin Pop
  return results
4248 923b1523 Iustin Pop
4249 923b1523 Iustin Pop
4250 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4251 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4252 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4253 a1f445d3 Iustin Pop

4254 a1f445d3 Iustin Pop
  """
4255 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4256 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4257 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4258 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4259 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4260 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4261 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4262 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4263 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4264 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4265 f9518d38 Iustin Pop
                                      shared_secret),
4266 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4267 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4268 a1f445d3 Iustin Pop
  return drbd_dev
4269 a1f445d3 Iustin Pop
4270 7c0d6283 Michael Hanselmann
4271 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4272 a8083063 Iustin Pop
                          instance_name, primary_node,
4273 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4274 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4275 e2a65344 Iustin Pop
                          base_index):
4276 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4277 a8083063 Iustin Pop

4278 a8083063 Iustin Pop
  """
4279 a8083063 Iustin Pop
  #TODO: compute space requirements
4280 a8083063 Iustin Pop
4281 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4282 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4283 08db7c5c Iustin Pop
  disks = []
4284 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4285 08db7c5c Iustin Pop
    pass
4286 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4287 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4288 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4289 923b1523 Iustin Pop
4290 08db7c5c Iustin Pop
    names = _GenerateUniqueNames(lu, [".disk%d" % i
4291 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4292 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4293 e2a65344 Iustin Pop
      disk_index = idx + base_index
4294 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4295 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4296 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4297 6ec66eae Iustin Pop
                              mode=disk["mode"])
4298 08db7c5c Iustin Pop
      disks.append(disk_dev)
4299 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4300 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4301 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4302 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4303 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4304 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4305 08db7c5c Iustin Pop
4306 e6c1ff2f Iustin Pop
    names = []
4307 e6c1ff2f Iustin Pop
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % i
4308 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4309 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4310 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4311 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4312 112050d9 Iustin Pop
      disk_index = idx + base_index
4313 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4314 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4315 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4316 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4317 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4318 08db7c5c Iustin Pop
      disks.append(disk_dev)
4319 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4320 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4321 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4322 0f1a06e3 Manuel Franceschini
4323 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4324 112050d9 Iustin Pop
      disk_index = idx + base_index
4325 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4326 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4327 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4328 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4329 43e99cff Guido Trotter
                                                         disk_index)),
4330 6ec66eae Iustin Pop
                              mode=disk["mode"])
4331 08db7c5c Iustin Pop
      disks.append(disk_dev)
4332 a8083063 Iustin Pop
  else:
4333 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4334 a8083063 Iustin Pop
  return disks
4335 a8083063 Iustin Pop
4336 a8083063 Iustin Pop
4337 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4338 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4339 3ecf6786 Iustin Pop

4340 3ecf6786 Iustin Pop
  """
4341 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4342 a0c3fea1 Michael Hanselmann
4343 a0c3fea1 Michael Hanselmann
4344 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4345 a8083063 Iustin Pop
  """Create all disks for an instance.
4346 a8083063 Iustin Pop

4347 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4348 a8083063 Iustin Pop

4349 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4350 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4351 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4352 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4353 e4376078 Iustin Pop
  @rtype: boolean
4354 e4376078 Iustin Pop
  @return: the success of the creation
4355 a8083063 Iustin Pop

4356 a8083063 Iustin Pop
  """
4357 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4358 428958aa Iustin Pop
  pnode = instance.primary_node
4359 a0c3fea1 Michael Hanselmann
4360 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4361 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4362 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4363 0f1a06e3 Manuel Franceschini
4364 4c4e4e1e Iustin Pop
    result.Raise("Failed to create directory '%s' on"
4365 4c4e4e1e Iustin Pop
                 " node %s: %s" % (file_storage_dir, pnode))
4366 0f1a06e3 Manuel Franceschini
4367 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4368 24991749 Iustin Pop
  # LUSetInstanceParams
4369 a8083063 Iustin Pop
  for device in instance.disks:
4370 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4371 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4372 a8083063 Iustin Pop
    #HARDCODE
4373 428958aa Iustin Pop
    for node in instance.all_nodes:
4374 428958aa Iustin Pop
      f_create = node == pnode
4375 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4376 a8083063 Iustin Pop
4377 a8083063 Iustin Pop
4378 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4379 a8083063 Iustin Pop
  """Remove all disks for an instance.
4380 a8083063 Iustin Pop

4381 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4382 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4383 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4384 a8083063 Iustin Pop
  with `_CreateDisks()`).
4385 a8083063 Iustin Pop

4386 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4387 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4388 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4389 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4390 e4376078 Iustin Pop
  @rtype: boolean
4391 e4376078 Iustin Pop
  @return: the success of the removal
4392 a8083063 Iustin Pop

4393 a8083063 Iustin Pop
  """
4394 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4395 a8083063 Iustin Pop
4396 e1bc0878 Iustin Pop
  all_result = True
4397 a8083063 Iustin Pop
  for device in instance.disks:
4398 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4399 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4400 4c4e4e1e Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4401 e1bc0878 Iustin Pop
      if msg:
4402 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4403 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4404 e1bc0878 Iustin Pop
        all_result = False
4405 0f1a06e3 Manuel Franceschini
4406 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4407 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4408 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4409 781de953 Iustin Pop
                                                 file_storage_dir)
4410 4c4e4e1e Iustin Pop
    msg = result.fail_msg
4411 b2b8bcce Iustin Pop
    if msg:
4412 b2b8bcce Iustin Pop
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4413 b2b8bcce Iustin Pop
                    file_storage_dir, instance.primary_node, msg)
4414 e1bc0878 Iustin Pop
      all_result = False
4415 0f1a06e3 Manuel Franceschini
4416 e1bc0878 Iustin Pop
  return all_result
4417 a8083063 Iustin Pop
4418 a8083063 Iustin Pop
4419 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4420 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4421 e2fe6369 Iustin Pop

4422 e2fe6369 Iustin Pop
  """
4423 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4424 e2fe6369 Iustin Pop
  req_size_dict = {
4425 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4426 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4427 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4428 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4429 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4430 e2fe6369 Iustin Pop
  }
4431 e2fe6369 Iustin Pop
4432 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4433 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4434 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4435 e2fe6369 Iustin Pop
4436 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4437 e2fe6369 Iustin Pop
4438 e2fe6369 Iustin Pop
4439 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4440 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4441 74409b12 Iustin Pop

4442 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4443 74409b12 Iustin Pop
  used in both instance create and instance modify.
4444 74409b12 Iustin Pop

4445 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4446 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4447 74409b12 Iustin Pop
  @type nodenames: list
4448 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4449 74409b12 Iustin Pop
  @type hvname: string
4450 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4451 74409b12 Iustin Pop
  @type hvparams: dict
4452 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4453 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4454 74409b12 Iustin Pop

4455 74409b12 Iustin Pop
  """
4456 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4457 74409b12 Iustin Pop
                                                  hvname,
4458 74409b12 Iustin Pop
                                                  hvparams)
4459 74409b12 Iustin Pop
  for node in nodenames:
4460 781de953 Iustin Pop
    info = hvinfo[node]
4461 68c6f21c Iustin Pop
    if info.offline:
4462 68c6f21c Iustin Pop
      continue
4463 4c4e4e1e Iustin Pop
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4464 74409b12 Iustin Pop
4465 74409b12 Iustin Pop
4466 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4467 a8083063 Iustin Pop
  """Create an instance.
4468 a8083063 Iustin Pop

4469 a8083063 Iustin Pop
  """
4470 a8083063 Iustin Pop
  HPATH = "instance-add"
4471 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4472 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4473 08db7c5c Iustin Pop
              "mode", "start",
4474 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4475 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4476 7baf741d Guido Trotter
  REQ_BGL = False
4477 7baf741d Guido Trotter
4478 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4479 7baf741d Guido Trotter
    """Expands and checks one node name.
4480 7baf741d Guido Trotter

4481 7baf741d Guido Trotter
    """
4482 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4483 7baf741d Guido Trotter
    if node_full is None:
4484 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4485 7baf741d Guido Trotter
    return node_full
4486 7baf741d Guido Trotter
4487 7baf741d Guido Trotter
  def ExpandNames(self):
4488 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4489 7baf741d Guido Trotter

4490 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4491 7baf741d Guido Trotter

4492 7baf741d Guido Trotter
    """
4493 7baf741d Guido Trotter
    self.needed_locks = {}
4494 7baf741d Guido Trotter
4495 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4496 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4497 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4498 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4499 7baf741d Guido Trotter
4500 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4501 4b2f38dd Iustin Pop
4502 7baf741d Guido Trotter
    # verify creation mode
4503 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4504 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4505 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4506 7baf741d Guido Trotter
                                 self.op.mode)
4507 4b2f38dd Iustin Pop
4508 7baf741d Guido Trotter
    # disk template and mirror node verification
4509 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4510 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4511 7baf741d Guido Trotter
4512 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4513 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4514 4b2f38dd Iustin Pop
4515 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4516 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4517 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4518 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4519 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4520 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4521 4b2f38dd Iustin Pop
4522 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4523 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4524 abe609b2 Guido Trotter
    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
4525 8705eb96 Iustin Pop
                                  self.op.hvparams)
4526 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4527 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4528 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4529 6785674e Iustin Pop
4530 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4531 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4532 4ef7f423 Guido Trotter
    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
4533 338e51e8 Iustin Pop
                                    self.op.beparams)
4534 338e51e8 Iustin Pop
4535 7baf741d Guido Trotter
    #### instance parameters check
4536 7baf741d Guido Trotter
4537 7baf741d Guido Trotter
    # instance name verification
4538 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4539 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4540 7baf741d Guido Trotter
4541 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4542 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4543 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4544 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4545 7baf741d Guido Trotter
                                 instance_name)
4546 7baf741d Guido Trotter
4547 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4548 7baf741d Guido Trotter
4549 08db7c5c Iustin Pop
    # NIC buildup
4550 08db7c5c Iustin Pop
    self.nics = []
4551 9dce4771 Guido Trotter
    for idx, nic in enumerate(self.op.nics):
4552 9dce4771 Guido Trotter
      nic_mode_req = nic.get("mode", None)
4553 9dce4771 Guido Trotter
      nic_mode = nic_mode_req
4554 9dce4771 Guido Trotter
      if nic_mode is None:
4555 9dce4771 Guido Trotter
        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
4556 9dce4771 Guido Trotter
4557 9dce4771 Guido Trotter
      # in routed mode, for the first nic, the default ip is 'auto'
4558 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
4559 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_AUTO
4560 9dce4771 Guido Trotter
      else:
4561 9dce4771 Guido Trotter
        default_ip_mode = constants.VALUE_NONE
4562 9dce4771 Guido Trotter
4563 08db7c5c Iustin Pop
      # ip validity checks
4564 9dce4771 Guido Trotter
      ip = nic.get("ip", default_ip_mode)
4565 9dce4771 Guido Trotter
      if ip is None or ip.lower() == constants.VALUE_NONE:
4566 08db7c5c Iustin Pop
        nic_ip = None
4567 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4568 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4569 08db7c5c Iustin Pop
      else:
4570 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4571 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4572 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4573 08db7c5c Iustin Pop
        nic_ip = ip
4574 08db7c5c Iustin Pop
4575 9dce4771 Guido Trotter
      # TODO: check the ip for uniqueness !!
4576 9dce4771 Guido Trotter
      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
4577 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Routed nic mode requires an ip address")
4578 9dce4771 Guido Trotter
4579 08db7c5c Iustin Pop
      # MAC address verification
4580 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4581 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4582 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4583 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4584 08db7c5c Iustin Pop
                                     mac)
4585 08db7c5c Iustin Pop
      # bridge verification
4586 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4587 9dce4771 Guido Trotter
      link = nic.get("link", None)
4588 9dce4771 Guido Trotter
      if bridge and link:
4589 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
4590 29921401 Iustin Pop
                                   " at the same time")
4591 9dce4771 Guido Trotter
      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
4592 9dce4771 Guido Trotter
        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic")
4593 9dce4771 Guido Trotter
      elif bridge:
4594 9dce4771 Guido Trotter
        link = bridge
4595 9dce4771 Guido Trotter
4596 9dce4771 Guido Trotter
      nicparams = {}
4597 9dce4771 Guido Trotter
      if nic_mode_req:
4598 9dce4771 Guido Trotter
        nicparams[constants.NIC_MODE] = nic_mode_req
4599 9dce4771 Guido Trotter
      if link:
4600 9dce4771 Guido Trotter
        nicparams[constants.NIC_LINK] = link
4601 9dce4771 Guido Trotter
4602 9dce4771 Guido Trotter
      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
4603 9dce4771 Guido Trotter
                                      nicparams)
4604 9dce4771 Guido Trotter
      objects.NIC.CheckParameterSyntax(check_params)
4605 9dce4771 Guido Trotter
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
4606 08db7c5c Iustin Pop
4607 08db7c5c Iustin Pop
    # disk checks/pre-build
4608 08db7c5c Iustin Pop
    self.disks = []
4609 08db7c5c Iustin Pop
    for disk in self.op.disks:
4610 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4611 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4612 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4613 08db7c5c Iustin Pop
                                   mode)
4614 08db7c5c Iustin Pop
      size = disk.get("size", None)
4615 08db7c5c Iustin Pop
      if size is None:
4616 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4617 08db7c5c Iustin Pop
      try:
4618 08db7c5c Iustin Pop
        size = int(size)
4619 08db7c5c Iustin Pop
      except ValueError:
4620 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4621 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4622 08db7c5c Iustin Pop
4623 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4624 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4625 7baf741d Guido Trotter
4626 7baf741d Guido Trotter
    # file storage checks
4627 7baf741d Guido Trotter
    if (self.op.file_driver and
4628 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4629 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4630 7baf741d Guido Trotter
                                 self.op.file_driver)
4631 7baf741d Guido Trotter
4632 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4633 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4634 7baf741d Guido Trotter
4635 7baf741d Guido Trotter
    ### Node/iallocator related checks
4636 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4637 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4638 7baf741d Guido Trotter
                                 " node must be given")
4639 7baf741d Guido Trotter
4640 7baf741d Guido Trotter
    if self.op.iallocator:
4641 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4642 7baf741d Guido Trotter
    else:
4643 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4644 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4645 7baf741d Guido Trotter
      if self.op.snode is not None:
4646 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4647 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4648 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4649 7baf741d Guido Trotter
4650 7baf741d Guido Trotter
    # in case of import lock the source node too
4651 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4652 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4653 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4654 7baf741d Guido Trotter
4655 b9322a9f Guido Trotter
      if src_path is None:
4656 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4657 b9322a9f Guido Trotter
4658 b9322a9f Guido Trotter
      if src_node is None:
4659 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4660 b9322a9f Guido Trotter
        self.op.src_node = None
4661 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4662 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4663 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4664 b9322a9f Guido Trotter
      else:
4665 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4666 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4667 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4668 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4669 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4670 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4671 7baf741d Guido Trotter
4672 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4673 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4674 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4675 a8083063 Iustin Pop
4676 538475ca Iustin Pop
  def _RunAllocator(self):
4677 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4678 538475ca Iustin Pop

4679 538475ca Iustin Pop
    """
4680 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4681 72737a7f Iustin Pop
    ial = IAllocator(self,
4682 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4683 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4684 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4685 d1c2dd75 Iustin Pop
                     tags=[],
4686 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4687 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4688 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4689 08db7c5c Iustin Pop
                     disks=self.disks,
4690 d1c2dd75 Iustin Pop
                     nics=nics,
4691 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4692 29859cb7 Iustin Pop
                     )
4693 d1c2dd75 Iustin Pop
4694 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4695 d1c2dd75 Iustin Pop
4696 d1c2dd75 Iustin Pop
    if not ial.success:
4697 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4698 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4699 d1c2dd75 Iustin Pop
                                                           ial.info))
4700 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4701 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4702 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4703 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4704 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4705 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4706 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4707 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4708 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4709 27579978 Iustin Pop
    if ial.required_nodes == 2:
4710 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4711 538475ca Iustin Pop
4712 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4713 a8083063 Iustin Pop
    """Build hooks env.
4714 a8083063 Iustin Pop

4715 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4716 a8083063 Iustin Pop

4717 a8083063 Iustin Pop
    """
4718 a8083063 Iustin Pop
    env = {
4719 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4720 a8083063 Iustin Pop
      }
4721 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4722 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4723 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4724 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4725 396e1b78 Michael Hanselmann
4726 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4727 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4728 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4729 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4730 4978db17 Iustin Pop
      status=self.op.start,
4731 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4732 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4733 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4734 f9b10246 Guido Trotter
      nics=_NICListToTuple(self, self.nics),
4735 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4736 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4737 67fc3042 Iustin Pop
      bep=self.be_full,
4738 67fc3042 Iustin Pop
      hvp=self.hv_full,
4739 67fc3042 Iustin Pop
      hypervisor=self.op.hypervisor,
4740 396e1b78 Michael Hanselmann
    ))
4741 a8083063 Iustin Pop
4742 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4743 a8083063 Iustin Pop
          self.secondaries)
4744 a8083063 Iustin Pop
    return env, nl, nl
4745 a8083063 Iustin Pop
4746 a8083063 Iustin Pop
4747 a8083063 Iustin Pop
  def CheckPrereq(self):
4748 a8083063 Iustin Pop
    """Check prerequisites.
4749 a8083063 Iustin Pop

4750 a8083063 Iustin Pop
    """
4751 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4752 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4753 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4754 eedc99de Manuel Franceschini
                                 " instances")
4755 eedc99de Manuel Franceschini
4756 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4757 7baf741d Guido Trotter
      src_node = self.op.src_node
4758 7baf741d Guido Trotter
      src_path = self.op.src_path
4759 a8083063 Iustin Pop
4760 c0cbdc67 Guido Trotter
      if src_node is None:
4761 1b7bfbb7 Iustin Pop
        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
4762 1b7bfbb7 Iustin Pop
        exp_list = self.rpc.call_export_list(locked_nodes)
4763 c0cbdc67 Guido Trotter
        found = False
4764 c0cbdc67 Guido Trotter
        for node in exp_list:
4765 4c4e4e1e Iustin Pop
          if exp_list[node].fail_msg:
4766 1b7bfbb7 Iustin Pop
            continue
4767 1b7bfbb7 Iustin Pop
          if src_path in exp_list[node].payload:
4768 c0cbdc67 Guido Trotter
            found = True
4769 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4770 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4771 c0cbdc67 Guido Trotter
                                                       src_path)
4772 c0cbdc67 Guido Trotter
            break
4773 c0cbdc67 Guido Trotter
        if not found:
4774 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4775 c0cbdc67 Guido Trotter
                                      src_path)
4776 c0cbdc67 Guido Trotter
4777 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4778 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4779 4c4e4e1e Iustin Pop
      result.Raise("No export or invalid export found in dir %s" % src_path)
4780 a8083063 Iustin Pop
4781 3eccac06 Iustin Pop
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4782 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4783 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4784 a8083063 Iustin Pop
4785 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4786 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4787 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4788 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4789 a8083063 Iustin Pop
4790 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4791 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4792 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4793 09acf207 Guido Trotter
      if instance_disks < export_disks:
4794 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4795 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4796 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4797 a8083063 Iustin Pop
4798 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4799 09acf207 Guido Trotter
      disk_images = []
4800 09acf207 Guido Trotter
      for idx in range(export_disks):
4801 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4802 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4803 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4804 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4805 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4806 09acf207 Guido Trotter
          disk_images.append(image)
4807 09acf207 Guido Trotter
        else:
4808 09acf207 Guido Trotter
          disk_images.append(False)
4809 09acf207 Guido Trotter
4810 09acf207 Guido Trotter
      self.src_images = disk_images
4811 901a65c1 Iustin Pop
4812 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4813 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4814 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4815 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4816 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4817 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4818 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4819 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4820 bc89efc3 Guido Trotter
4821 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4822 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4823 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4824 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4825 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4826 901a65c1 Iustin Pop
4827 901a65c1 Iustin Pop
    if self.op.ip_check:
4828 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4829 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4830 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4831 901a65c1 Iustin Pop
4832 295728df Guido Trotter
    #### mac address generation
4833 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4834 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4835 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4836 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4837 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4838 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4839 295728df Guido Trotter
    # creation job will fail.
4840 295728df Guido Trotter
    for nic in self.nics:
4841 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4842 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4843 295728df Guido Trotter
4844 538475ca Iustin Pop
    #### allocator run
4845 538475ca Iustin Pop
4846 538475ca Iustin Pop
    if self.op.iallocator is not None:
4847 538475ca Iustin Pop
      self._RunAllocator()
4848 0f1a06e3 Manuel Franceschini
4849 901a65c1 Iustin Pop
    #### node related checks
4850 901a65c1 Iustin Pop
4851 901a65c1 Iustin Pop
    # check primary node
4852 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4853 7baf741d Guido Trotter
    assert self.pnode is not None, \
4854 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4855 7527a8a4 Iustin Pop
    if pnode.offline:
4856 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4857 7527a8a4 Iustin Pop
                                 pnode.name)
4858 733a2b6a Iustin Pop
    if pnode.drained:
4859 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4860 733a2b6a Iustin Pop
                                 pnode.name)
4861 7527a8a4 Iustin Pop
4862 901a65c1 Iustin Pop
    self.secondaries = []
4863 901a65c1 Iustin Pop
4864 901a65c1 Iustin Pop
    # mirror node verification
4865 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4866 7baf741d Guido Trotter
      if self.op.snode is None:
4867 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4868 3ecf6786 Iustin Pop
                                   " a mirror node")
4869 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4870 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4871 3ecf6786 Iustin Pop
                                   " the primary node.")
4872 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4873 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4874 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4875 a8083063 Iustin Pop
4876 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4877 6785674e Iustin Pop
4878 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4879 08db7c5c Iustin Pop
                                self.disks)
4880 ed1ebc60 Guido Trotter
4881 8d75db10 Iustin Pop
    # Check lv size requirements
4882 8d75db10 Iustin Pop
    if req_size is not None:
4883 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4884 72737a7f Iustin Pop
                                         self.op.hypervisor)
4885 8d75db10 Iustin Pop
      for node in nodenames:
4886 781de953 Iustin Pop
        info = nodeinfo[node]
4887 4c4e4e1e Iustin Pop
        info.Raise("Cannot get current information from node %s" % node)
4888 070e998b Iustin Pop
        info = info.payload
4889 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4890 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4891 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4892 8d75db10 Iustin Pop
                                     " node %s" % node)
4893 070e998b Iustin Pop
        if req_size > vg_free:
4894 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4895 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4896 070e998b Iustin Pop
                                     (node, vg_free, req_size))
4897 ed1ebc60 Guido Trotter
4898 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4899 6785674e Iustin Pop
4900 a8083063 Iustin Pop
    # os verification
4901 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4902 4c4e4e1e Iustin Pop
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4903 4c4e4e1e Iustin Pop
                 (self.op.os_type, pnode.name), prereq=True)
4904 a8083063 Iustin Pop
4905 b165e77e Guido Trotter
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4906 a8083063 Iustin Pop
4907 49ce1563 Iustin Pop
    # memory check on primary node
4908 49ce1563 Iustin Pop
    if self.op.start:
4909 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4910 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4911 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4912 338e51e8 Iustin Pop
                           self.op.hypervisor)
4913 49ce1563 Iustin Pop
4914 08896026 Iustin Pop
    self.dry_run_result = list(nodenames)
4915 08896026 Iustin Pop
4916 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4917 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4918 a8083063 Iustin Pop

4919 a8083063 Iustin Pop
    """
4920 a8083063 Iustin Pop
    instance = self.op.instance_name
4921 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4922 a8083063 Iustin Pop
4923 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4924 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4925 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4926 2a6469d5 Alexander Schreiber
    else:
4927 2a6469d5 Alexander Schreiber
      network_port = None
4928 58acb49d Alexander Schreiber
4929 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4930 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4931 31a853d2 Iustin Pop
4932 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4933 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4934 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4935 2c313123 Manuel Franceschini
    else:
4936 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4937 2c313123 Manuel Franceschini
4938 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4939 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4940 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4941 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4942 0f1a06e3 Manuel Franceschini
4943 0f1a06e3 Manuel Franceschini
4944 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4945 a8083063 Iustin Pop
                                  self.op.disk_template,
4946 a8083063 Iustin Pop
                                  instance, pnode_name,
4947 08db7c5c Iustin Pop
                                  self.secondaries,
4948 08db7c5c Iustin Pop
                                  self.disks,
4949 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4950 e2a65344 Iustin Pop
                                  self.op.file_driver,
4951 e2a65344 Iustin Pop
                                  0)
4952 a8083063 Iustin Pop
4953 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4954 a8083063 Iustin Pop
                            primary_node=pnode_name,
4955 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4956 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4957 4978db17 Iustin Pop
                            admin_up=False,
4958 58acb49d Alexander Schreiber
                            network_port=network_port,
4959 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
4960 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
4961 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
4962 a8083063 Iustin Pop
                            )
4963 a8083063 Iustin Pop
4964 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
4965 796cab27 Iustin Pop
    try:
4966 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
4967 796cab27 Iustin Pop
    except errors.OpExecError:
4968 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
4969 796cab27 Iustin Pop
      try:
4970 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
4971 796cab27 Iustin Pop
      finally:
4972 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
4973 796cab27 Iustin Pop
        raise
4974 a8083063 Iustin Pop
4975 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
4976 a8083063 Iustin Pop
4977 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
4978 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
4979 7baf741d Guido Trotter
    # added the instance to the config
4980 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
4981 e36e96b4 Guido Trotter
    # Unlock all the nodes
4982 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4983 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
4984 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
4985 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
4986 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
4987 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
4988 9c8971d7 Guido Trotter
    else:
4989 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
4990 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
4991 a8083063 Iustin Pop
4992 a8083063 Iustin Pop
    if self.op.wait_for_sync:
4993 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
4994 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
4995 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
4996 a8083063 Iustin Pop
      time.sleep(15)
4997 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
4998 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
4999 a8083063 Iustin Pop
    else:
5000 a8083063 Iustin Pop
      disk_abort = False
5001 a8083063 Iustin Pop
5002 a8083063 Iustin Pop
    if disk_abort:
5003 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5004 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5005 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5006 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5007 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5008 3ecf6786 Iustin Pop
                               " this instance")
5009 a8083063 Iustin Pop
5010 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5011 a8083063 Iustin Pop
                (instance, pnode_name))
5012 a8083063 Iustin Pop
5013 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5014 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5015 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5016 e557bae9 Guido Trotter
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
5017 4c4e4e1e Iustin Pop
        result.Raise("Could not add os for instance %s"
5018 4c4e4e1e Iustin Pop
                     " on node %s" % (instance, pnode_name))
5019 a8083063 Iustin Pop
5020 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5021 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5022 a8083063 Iustin Pop
        src_node = self.op.src_node
5023 09acf207 Guido Trotter
        src_images = self.src_images
5024 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5025 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5026 09acf207 Guido Trotter
                                                         src_node, src_images,
5027 6c0af70e Guido Trotter
                                                         cluster_name)
5028 4c4e4e1e Iustin Pop
        msg = import_result.fail_msg
5029 944bf548 Iustin Pop
        if msg:
5030 944bf548 Iustin Pop
          self.LogWarning("Error while importing the disk images for instance"
5031 944bf548 Iustin Pop
                          " %s on node %s: %s" % (instance, pnode_name, msg))
5032 a8083063 Iustin Pop
      else:
5033 a8083063 Iustin Pop
        # also checked in the prereq part
5034 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5035 3ecf6786 Iustin Pop
                                     % self.op.mode)
5036 a8083063 Iustin Pop
5037 a8083063 Iustin Pop
    if self.op.start:
5038 4978db17 Iustin Pop
      iobj.admin_up = True
5039 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5040 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5041 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5042 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5043 4c4e4e1e Iustin Pop
      result.Raise("Could not start instance")
5044 a8083063 Iustin Pop
5045 08896026 Iustin Pop
    return list(iobj.all_nodes)
5046 08896026 Iustin Pop
5047 a8083063 Iustin Pop
5048 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5049 a8083063 Iustin Pop
  """Connect to an instance's console.
5050 a8083063 Iustin Pop

5051 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5052 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5053 a8083063 Iustin Pop
  console.
5054 a8083063 Iustin Pop

5055 a8083063 Iustin Pop
  """
5056 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5057 8659b73e Guido Trotter
  REQ_BGL = False
5058 8659b73e Guido Trotter
5059 8659b73e Guido Trotter
  def ExpandNames(self):
5060 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5061 a8083063 Iustin Pop
5062 a8083063 Iustin Pop
  def CheckPrereq(self):
5063 a8083063 Iustin Pop
    """Check prerequisites.
5064 a8083063 Iustin Pop

5065 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5066 a8083063 Iustin Pop

5067 a8083063 Iustin Pop
    """
5068 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5069 8659b73e Guido Trotter
    assert self.instance is not None, \
5070 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5071 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5072 a8083063 Iustin Pop
5073 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5074 a8083063 Iustin Pop
    """Connect to the console of an instance
5075 a8083063 Iustin Pop

5076 a8083063 Iustin Pop
    """
5077 a8083063 Iustin Pop
    instance = self.instance
5078 a8083063 Iustin Pop
    node = instance.primary_node
5079 a8083063 Iustin Pop
5080 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5081 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5082 4c4e4e1e Iustin Pop
    node_insts.Raise("Can't get node information from %s" % node)
5083 a8083063 Iustin Pop
5084 aca13712 Iustin Pop
    if instance.name not in node_insts.payload:
5085 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5086 a8083063 Iustin Pop
5087 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5088 a8083063 Iustin Pop
5089 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5090 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5091 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5092 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5093 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5094 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5095 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5096 b047857b Michael Hanselmann
5097 82122173 Iustin Pop
    # build ssh cmdline
5098 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5099 a8083063 Iustin Pop
5100 a8083063 Iustin Pop
5101 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5102 a8083063 Iustin Pop
  """Replace the disks of an instance.
5103 a8083063 Iustin Pop

5104 a8083063 Iustin Pop
  """
5105 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5106 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5107 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5108 efd990e4 Guido Trotter
  REQ_BGL = False
5109 efd990e4 Guido Trotter
5110 7e9366f7 Iustin Pop
  def CheckArguments(self):
5111 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5112 efd990e4 Guido Trotter
      self.op.remote_node = None
5113 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5114 7e9366f7 Iustin Pop
      self.op.iallocator = None
5115 7e9366f7 Iustin Pop
5116 7e9366f7 Iustin Pop
    # check for valid parameter combination
5117 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5118 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5119 7e9366f7 Iustin Pop
      if cnt == 2:
5120 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5121 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5122 7e9366f7 Iustin Pop
                                   " new node given")
5123 7e9366f7 Iustin Pop
      elif cnt == 0:
5124 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5125 efd990e4 Guido Trotter
                                   " secondary, not both")
5126 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5127 7e9366f7 Iustin Pop
      if cnt != 2:
5128 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5129 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5130 7e9366f7 Iustin Pop
                                   " secondary node")
5131 7e9366f7 Iustin Pop
5132 7e9366f7 Iustin Pop
  def ExpandNames(self):
5133 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5134 7e9366f7 Iustin Pop
5135 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5136 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5137 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5138 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5139 efd990e4 Guido Trotter
      if remote_node is None:
5140 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5141 efd990e4 Guido Trotter
                                   self.op.remote_node)
5142 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5143 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5144 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5145 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5146 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5147 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5148 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5149 efd990e4 Guido Trotter
    else:
5150 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5151 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5152 efd990e4 Guido Trotter
5153 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5154 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5155 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5156 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5157 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5158 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5159 a8083063 Iustin Pop
5160 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5161 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5162 b6e82a65 Iustin Pop

5163 b6e82a65 Iustin Pop
    """
5164 72737a7f Iustin Pop
    ial = IAllocator(self,
5165 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5166 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5167 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5168 b6e82a65 Iustin Pop
5169 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5170 b6e82a65 Iustin Pop
5171 b6e82a65 Iustin Pop
    if not ial.success:
5172 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5173 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5174 b6e82a65 Iustin Pop
                                                           ial.info))
5175 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5176 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5177 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5178 b6e82a65 Iustin Pop
                                 (len(ial.nodes), ial.required_nodes))
5179 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5180 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5181 86d9d3bb Iustin Pop
                 self.op.remote_node)
5182 b6e82a65 Iustin Pop
5183 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5184 a8083063 Iustin Pop
    """Build hooks env.
5185 a8083063 Iustin Pop

5186 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5187 a8083063 Iustin Pop

5188 a8083063 Iustin Pop
    """
5189 a8083063 Iustin Pop
    env = {
5190 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5191 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5192 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5193 a8083063 Iustin Pop
      }
5194 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5195 0834c866 Iustin Pop
    nl = [
5196 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5197 0834c866 Iustin Pop
      self.instance.primary_node,
5198 0834c866 Iustin Pop
      ]
5199 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5200 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5201 a8083063 Iustin Pop
    return env, nl, nl
5202 a8083063 Iustin Pop
5203 a8083063 Iustin Pop
  def CheckPrereq(self):
5204 a8083063 Iustin Pop
    """Check prerequisites.
5205 a8083063 Iustin Pop

5206 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5207 a8083063 Iustin Pop

5208 a8083063 Iustin Pop
    """
5209 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5210 efd990e4 Guido Trotter
    assert instance is not None, \
5211 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5212 a8083063 Iustin Pop
    self.instance = instance
5213 a8083063 Iustin Pop
5214 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5215 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5216 7e9366f7 Iustin Pop
                                 " instances")
5217 a8083063 Iustin Pop
5218 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5219 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5220 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5221 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5222 a8083063 Iustin Pop
5223 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5224 a9e0c397 Iustin Pop
5225 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5226 de8c7666 Guido Trotter
      self._RunAllocator()
5227 b6e82a65 Iustin Pop
5228 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5229 a9e0c397 Iustin Pop
    if remote_node is not None:
5230 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5231 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5232 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5233 a9e0c397 Iustin Pop
    else:
5234 a9e0c397 Iustin Pop
      self.remote_node_info = None
5235 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5236 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5237 3ecf6786 Iustin Pop
                                 " the instance.")
5238 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5239 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5240 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5241 7e9366f7 Iustin Pop
5242 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5243 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5244 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5245 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5246 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5247 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5248 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5249 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5250 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5251 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5252 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5253 7e9366f7 Iustin Pop
    else:
5254 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5255 7e9366f7 Iustin Pop
5256 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5257 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5258 a9e0c397 Iustin Pop
5259 54155f52 Iustin Pop
    if not self.op.disks:
5260 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5261 54155f52 Iustin Pop
5262 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5263 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5264 a8083063 Iustin Pop
5265 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5266 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5267 a9e0c397 Iustin Pop

5268 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5269 e4376078 Iustin Pop

5270 e4376078 Iustin Pop
      1. for each disk to be replaced:
5271 e4376078 Iustin Pop

5272 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5273 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5274 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5275 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5276 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5277 e4376078 Iustin Pop

5278 e4376078 Iustin Pop
      1. wait for sync across all devices
5279 e4376078 Iustin Pop

5280 e4376078 Iustin Pop
      1. for each modified disk:
5281 e4376078 Iustin Pop

5282 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5283 a9e0c397 Iustin Pop

5284 a9e0c397 Iustin Pop
    Failures are not very well handled.
5285 cff90b79 Iustin Pop

5286 a9e0c397 Iustin Pop
    """
5287 cff90b79 Iustin Pop
    steps_total = 6
5288 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5289 a9e0c397 Iustin Pop
    instance = self.instance
5290 a9e0c397 Iustin Pop
    iv_names = {}
5291 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5292 a9e0c397 Iustin Pop
    # start of work
5293 a9e0c397 Iustin Pop
    cfg = self.cfg
5294 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5295 cff90b79 Iustin Pop
    oth_node = self.oth_node
5296 cff90b79 Iustin Pop
5297 cff90b79 Iustin Pop
    # Step: check device activation
5298 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5299 cff90b79 Iustin Pop
    info("checking volume groups")
5300 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5301 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5302 cff90b79 Iustin Pop
    if not results:
5303 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5304 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5305 781de953 Iustin Pop
      res = results[node]
5306 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5307 e480923b Iustin Pop
      if my_vg not in res.payload:
5308 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5309 cff90b79 Iustin Pop
                                 (my_vg, node))
5310 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5311 54155f52 Iustin Pop
      if idx not in self.op.disks:
5312 cff90b79 Iustin Pop
        continue
5313 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5314 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5315 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5316 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5317 4c4e4e1e Iustin Pop
        msg = result.fail_msg
5318 23829f6f Iustin Pop
        if not msg and not result.payload:
5319 23829f6f Iustin Pop
          msg = "disk not found"
5320 23829f6f Iustin Pop
        if msg:
5321 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5322 23829f6f Iustin Pop
                                   (idx, node, msg))
5323 cff90b79 Iustin Pop
5324 cff90b79 Iustin Pop
    # Step: check other node consistency
5325 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5326 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5327 54155f52 Iustin Pop
      if idx not in self.op.disks:
5328 cff90b79 Iustin Pop
        continue
5329 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5330 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5331 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5332 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5333 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5334 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5335 cff90b79 Iustin Pop
5336 cff90b79 Iustin Pop
    # Step: create new storage
5337 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5338 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5339 54155f52 Iustin Pop
      if idx not in self.op.disks:
5340 a9e0c397 Iustin Pop
        continue
5341 a9e0c397 Iustin Pop
      size = dev.size
5342 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5343 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5344 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5345 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5346 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5347 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5348 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5349 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5350 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5351 a9e0c397 Iustin Pop
      old_lvs = dev.children
5352 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5353 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5354 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5355 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5356 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5357 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5358 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5359 a9e0c397 Iustin Pop
5360 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5361 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5362 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5363 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5364 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5365 4c4e4e1e Iustin Pop
      result.Raise("Can't detach drbd from local storage on node"
5366 4c4e4e1e Iustin Pop
                   " %s for device %s" % (tgt_node, dev.iv_name))
5367 cff90b79 Iustin Pop
      #dev.children = []
5368 cff90b79 Iustin Pop
      #cfg.Update(instance)
5369 a9e0c397 Iustin Pop
5370 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5371 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5372 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5373 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5374 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5375 cff90b79 Iustin Pop
5376 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5377 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5378 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5379 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5380 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5381 cff90b79 Iustin Pop
      rlist = []
5382 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5383 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5384 4c4e4e1e Iustin Pop
        if not result.fail_msg and result.payload:
5385 23829f6f Iustin Pop
          # device exists
5386 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5387 cff90b79 Iustin Pop
5388 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5389 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5390 4c4e4e1e Iustin Pop
      result.Raise("Can't rename old LVs on node %s" % tgt_node)
5391 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5392 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5393 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5394 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5395 4c4e4e1e Iustin Pop
      result.Raise("Can't rename new LVs on node %s" % tgt_node)
5396 cff90b79 Iustin Pop
5397 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5398 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5399 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5400 a9e0c397 Iustin Pop
5401 cff90b79 Iustin Pop
      for disk in old_lvs:
5402 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5403 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5404 a9e0c397 Iustin Pop
5405 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5406 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5407 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5408 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5409 2cc1da8b Iustin Pop
      if msg:
5410 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5411 4c4e4e1e Iustin Pop
          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
5412 4c4e4e1e Iustin Pop
          if msg2:
5413 4c4e4e1e Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg2,
5414 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5415 2cc1da8b Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5416 a9e0c397 Iustin Pop
5417 a9e0c397 Iustin Pop
      dev.children = new_lvs
5418 a9e0c397 Iustin Pop
      cfg.Update(instance)
5419 a9e0c397 Iustin Pop
5420 cff90b79 Iustin Pop
    # Step: wait for sync
5421 a9e0c397 Iustin Pop
5422 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5423 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5424 a9e0c397 Iustin Pop
    # return value
5425 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5426 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5427 a9e0c397 Iustin Pop
5428 a9e0c397 Iustin Pop
    # so check manually all the devices
5429 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5430 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5431 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5432 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5433 23829f6f Iustin Pop
      if not msg and not result.payload:
5434 23829f6f Iustin Pop
        msg = "disk not found"
5435 23829f6f Iustin Pop
      if msg:
5436 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5437 23829f6f Iustin Pop
                                 (name, msg))
5438 23829f6f Iustin Pop
      if result.payload[5]:
5439 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5440 a9e0c397 Iustin Pop
5441 cff90b79 Iustin Pop
    # Step: remove old storage
5442 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5443 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5444 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5445 a9e0c397 Iustin Pop
      for lv in old_lvs:
5446 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5447 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
5448 e1bc0878 Iustin Pop
        if msg:
5449 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5450 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5451 a9e0c397 Iustin Pop
          continue
5452 a9e0c397 Iustin Pop
5453 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5454 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5455 a9e0c397 Iustin Pop

5456 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5457 a9e0c397 Iustin Pop
      - for all disks of the instance:
5458 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5459 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5460 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5461 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5462 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5463 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5464 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5465 a9e0c397 Iustin Pop
          not network enabled
5466 a9e0c397 Iustin Pop
      - wait for sync across all devices
5467 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5468 a9e0c397 Iustin Pop

5469 a9e0c397 Iustin Pop
    Failures are not very well handled.
5470 0834c866 Iustin Pop

5471 a9e0c397 Iustin Pop
    """
5472 0834c866 Iustin Pop
    steps_total = 6
5473 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5474 a9e0c397 Iustin Pop
    instance = self.instance
5475 a9e0c397 Iustin Pop
    iv_names = {}
5476 a9e0c397 Iustin Pop
    # start of work
5477 a9e0c397 Iustin Pop
    cfg = self.cfg
5478 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5479 a9e0c397 Iustin Pop
    new_node = self.new_node
5480 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5481 a2d59d8b Iustin Pop
    nodes_ip = {
5482 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5483 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5484 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5485 a2d59d8b Iustin Pop
      }
5486 0834c866 Iustin Pop
5487 0834c866 Iustin Pop
    # Step: check device activation
5488 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5489 0834c866 Iustin Pop
    info("checking volume groups")
5490 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5491 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5492 0834c866 Iustin Pop
    for node in pri_node, new_node:
5493 781de953 Iustin Pop
      res = results[node]
5494 4c4e4e1e Iustin Pop
      res.Raise("Error checking node %s" % node)
5495 e480923b Iustin Pop
      if my_vg not in res.payload:
5496 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5497 0834c866 Iustin Pop
                                 (my_vg, node))
5498 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5499 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5500 0834c866 Iustin Pop
        continue
5501 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5502 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5503 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5504 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5505 23829f6f Iustin Pop
      if not msg and not result.payload:
5506 23829f6f Iustin Pop
        msg = "disk not found"
5507 23829f6f Iustin Pop
      if msg:
5508 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5509 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5510 0834c866 Iustin Pop
5511 0834c866 Iustin Pop
    # Step: check other node consistency
5512 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5513 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5514 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5515 0834c866 Iustin Pop
        continue
5516 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5517 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5518 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5519 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5520 0834c866 Iustin Pop
                                 pri_node)
5521 0834c866 Iustin Pop
5522 0834c866 Iustin Pop
    # Step: create new storage
5523 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5524 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5525 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5526 d418ebfb Iustin Pop
           (new_node, idx))
5527 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5528 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5529 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5530 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5531 a9e0c397 Iustin Pop
5532 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5533 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5534 a1578d63 Iustin Pop
    # error and the success paths
5535 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5536 a1578d63 Iustin Pop
                                   instance.name)
5537 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5538 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5539 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5540 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5541 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5542 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5543 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5544 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5545 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5546 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5547 a2d59d8b Iustin Pop
        p_minor = o_minor1
5548 ffa1c0dc Iustin Pop
      else:
5549 a2d59d8b Iustin Pop
        p_minor = o_minor2
5550 a2d59d8b Iustin Pop
5551 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5552 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5553 a2d59d8b Iustin Pop
5554 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5555 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5556 a2d59d8b Iustin Pop
                    new_net_id)
5557 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5558 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5559 8a6c7011 Iustin Pop
                              children=dev.children,
5560 8a6c7011 Iustin Pop
                              size=dev.size)
5561 796cab27 Iustin Pop
      try:
5562 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5563 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5564 82759cb1 Iustin Pop
      except errors.GenericError:
5565 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5566 796cab27 Iustin Pop
        raise
5567 a9e0c397 Iustin Pop
5568 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5569 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5570 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5571 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5572 4c4e4e1e Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
5573 cacfd1fd Iustin Pop
      if msg:
5574 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5575 cacfd1fd Iustin Pop
                (idx, msg),
5576 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5577 a9e0c397 Iustin Pop
5578 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5579 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5580 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5581 642445d9 Iustin Pop
5582 4c4e4e1e Iustin Pop
    msg = result.fail_msg
5583 a2d59d8b Iustin Pop
    if msg:
5584 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5585 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5586 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5587 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5588 642445d9 Iustin Pop
5589 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5590 642445d9 Iustin Pop
    # the instance to point to the new secondary
5591 642445d9 Iustin Pop
    info("updating instance configuration")
5592 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5593 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5594 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5595 642445d9 Iustin Pop
    cfg.Update(instance)
5596 a9e0c397 Iustin Pop
5597 642445d9 Iustin Pop
    # and now perform the drbd attach
5598 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5599 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5600 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5601 a2d59d8b Iustin Pop
                                           False)
5602 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5603 4c4e4e1e Iustin Pop
      msg = to_result.fail_msg
5604 a2d59d8b Iustin Pop
      if msg:
5605 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5606 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5607 a2d59d8b Iustin Pop
                " status of disks")
5608 a9e0c397 Iustin Pop
5609 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5610 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5611 a9e0c397 Iustin Pop
    # return value
5612 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5613 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5614 a9e0c397 Iustin Pop
5615 a9e0c397 Iustin Pop
    # so check manually all the devices
5616 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5617 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5618 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5619 4c4e4e1e Iustin Pop
      msg = result.fail_msg
5620 23829f6f Iustin Pop
      if not msg and not result.payload:
5621 23829f6f Iustin Pop
        msg = "disk not found"
5622 23829f6f Iustin Pop
      if msg:
5623 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5624 23829f6f Iustin Pop
                                 (idx, msg))
5625 23829f6f Iustin Pop
      if result.payload[5]:
5626 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5627 a9e0c397 Iustin Pop
5628 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5629 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5630 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5631 a9e0c397 Iustin Pop
      for lv in old_lvs:
5632 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5633 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
5634 e1bc0878 Iustin Pop
        if msg:
5635 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5636 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5637 a9e0c397 Iustin Pop
5638 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5639 a9e0c397 Iustin Pop
    """Execute disk replacement.
5640 a9e0c397 Iustin Pop

5641 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5642 a9e0c397 Iustin Pop

5643 a9e0c397 Iustin Pop
    """
5644 a9e0c397 Iustin Pop
    instance = self.instance
5645 22985314 Guido Trotter
5646 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5647 0d68c45d Iustin Pop
    if not instance.admin_up:
5648 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5649 22985314 Guido Trotter
5650 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5651 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5652 a9e0c397 Iustin Pop
    else:
5653 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5654 22985314 Guido Trotter
5655 22985314 Guido Trotter
    ret = fn(feedback_fn)
5656 22985314 Guido Trotter
5657 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5658 0d68c45d Iustin Pop
    if not instance.admin_up:
5659 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5660 22985314 Guido Trotter
5661 22985314 Guido Trotter
    return ret
5662 a9e0c397 Iustin Pop
5663 a8083063 Iustin Pop
5664 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5665 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5666 8729e0d7 Iustin Pop

5667 8729e0d7 Iustin Pop
  """
5668 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5669 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5670 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5671 31e63dbf Guido Trotter
  REQ_BGL = False
5672 31e63dbf Guido Trotter
5673 31e63dbf Guido Trotter
  def ExpandNames(self):
5674 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5675 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5676 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5677 31e63dbf Guido Trotter
5678 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5679 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5680 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5681 8729e0d7 Iustin Pop
5682 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5683 8729e0d7 Iustin Pop
    """Build hooks env.
5684 8729e0d7 Iustin Pop

5685 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5686 8729e0d7 Iustin Pop

5687 8729e0d7 Iustin Pop
    """
5688 8729e0d7 Iustin Pop
    env = {
5689 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5690 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5691 8729e0d7 Iustin Pop
      }
5692 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5693 8729e0d7 Iustin Pop
    nl = [
5694 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5695 8729e0d7 Iustin Pop
      self.instance.primary_node,
5696 8729e0d7 Iustin Pop
      ]
5697 8729e0d7 Iustin Pop
    return env, nl, nl
5698 8729e0d7 Iustin Pop
5699 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5700 8729e0d7 Iustin Pop
    """Check prerequisites.
5701 8729e0d7 Iustin Pop

5702 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5703 8729e0d7 Iustin Pop

5704 8729e0d7 Iustin Pop
    """
5705 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5706 31e63dbf Guido Trotter
    assert instance is not None, \
5707 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5708 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5709 6b12959c Iustin Pop
    for node in nodenames:
5710 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5711 7527a8a4 Iustin Pop
5712 31e63dbf Guido Trotter
5713 8729e0d7 Iustin Pop
    self.instance = instance
5714 8729e0d7 Iustin Pop
5715 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5716 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5717 8729e0d7 Iustin Pop
                                 " growing.")
5718 8729e0d7 Iustin Pop
5719 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5720 8729e0d7 Iustin Pop
5721 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5722 72737a7f Iustin Pop
                                       instance.hypervisor)
5723 8729e0d7 Iustin Pop
    for node in nodenames:
5724 781de953 Iustin Pop
      info = nodeinfo[node]
5725 4c4e4e1e Iustin Pop
      info.Raise("Cannot get current information from node %s" % node)
5726 070e998b Iustin Pop
      vg_free = info.payload.get('vg_free', None)
5727 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5728 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5729 8729e0d7 Iustin Pop
                                   " node %s" % node)
5730 781de953 Iustin Pop
      if self.op.amount > vg_free:
5731 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5732 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5733 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5734 8729e0d7 Iustin Pop
5735 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5736 8729e0d7 Iustin Pop
    """Execute disk grow.
5737 8729e0d7 Iustin Pop

5738 8729e0d7 Iustin Pop
    """
5739 8729e0d7 Iustin Pop
    instance = self.instance
5740 ad24e046 Iustin Pop
    disk = self.disk
5741 6b12959c Iustin Pop
    for node in instance.all_nodes:
5742 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5743 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5744 4c4e4e1e Iustin Pop
      result.Raise("Grow request failed to node %s" % node)
5745 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5746 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5747 6605411d Iustin Pop
    if self.op.wait_for_sync:
5748 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5749 6605411d Iustin Pop
      if disk_abort:
5750 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5751 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5752 8729e0d7 Iustin Pop
5753 8729e0d7 Iustin Pop
5754 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5755 a8083063 Iustin Pop
  """Query runtime instance data.
5756 a8083063 Iustin Pop

5757 a8083063 Iustin Pop
  """
5758 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5759 a987fa48 Guido Trotter
  REQ_BGL = False
5760 ae5849b5 Michael Hanselmann
5761 a987fa48 Guido Trotter
  def ExpandNames(self):
5762 a987fa48 Guido Trotter
    self.needed_locks = {}
5763 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5764 a987fa48 Guido Trotter
5765 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5766 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5767 a987fa48 Guido Trotter
5768 a987fa48 Guido Trotter
    if self.op.instances:
5769 a987fa48 Guido Trotter
      self.wanted_names = []
5770 a987fa48 Guido Trotter
      for name in self.op.instances:
5771 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5772 a987fa48 Guido Trotter
        if full_name is None:
5773 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5774 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5775 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5776 a987fa48 Guido Trotter
    else:
5777 a987fa48 Guido Trotter
      self.wanted_names = None
5778 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5779 a987fa48 Guido Trotter
5780 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5781 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5782 a987fa48 Guido Trotter
5783 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5784 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5785 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5786 a8083063 Iustin Pop
5787 a8083063 Iustin Pop
  def CheckPrereq(self):
5788 a8083063 Iustin Pop
    """Check prerequisites.
5789 a8083063 Iustin Pop

5790 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5791 a8083063 Iustin Pop

5792 a8083063 Iustin Pop
    """
5793 a987fa48 Guido Trotter
    if self.wanted_names is None:
5794 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5795 a8083063 Iustin Pop
5796 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5797 a987fa48 Guido Trotter
                             in self.wanted_names]
5798 a987fa48 Guido Trotter
    return
5799 a8083063 Iustin Pop
5800 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5801 a8083063 Iustin Pop
    """Compute block device status.
5802 a8083063 Iustin Pop

5803 a8083063 Iustin Pop
    """
5804 57821cac Iustin Pop
    static = self.op.static
5805 57821cac Iustin Pop
    if not static:
5806 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5807 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5808 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5809 9854f5d0 Iustin Pop
        dev_pstatus = None
5810 9854f5d0 Iustin Pop
      else:
5811 4c4e4e1e Iustin Pop
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5812 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5813 57821cac Iustin Pop
    else:
5814 57821cac Iustin Pop
      dev_pstatus = None
5815 57821cac Iustin Pop
5816 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5817 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5818 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5819 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5820 a8083063 Iustin Pop
      else:
5821 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5822 a8083063 Iustin Pop
5823 57821cac Iustin Pop
    if snode and not static:
5824 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5825 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5826 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5827 9854f5d0 Iustin Pop
        dev_sstatus = None
5828 9854f5d0 Iustin Pop
      else:
5829 4c4e4e1e Iustin Pop
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5830 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5831 a8083063 Iustin Pop
    else:
5832 a8083063 Iustin Pop
      dev_sstatus = None
5833 a8083063 Iustin Pop
5834 a8083063 Iustin Pop
    if dev.children:
5835 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5836 a8083063 Iustin Pop
                      for child in dev.children]
5837 a8083063 Iustin Pop
    else:
5838 a8083063 Iustin Pop
      dev_children = []
5839 a8083063 Iustin Pop
5840 a8083063 Iustin Pop
    data = {
5841 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5842 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5843 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5844 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5845 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5846 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5847 a8083063 Iustin Pop
      "children": dev_children,
5848 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5849 c98162a7 Iustin Pop
      "size": dev.size,
5850 a8083063 Iustin Pop
      }
5851 a8083063 Iustin Pop
5852 a8083063 Iustin Pop
    return data
5853 a8083063 Iustin Pop
5854 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5855 a8083063 Iustin Pop
    """Gather and return data"""
5856 a8083063 Iustin Pop
    result = {}
5857 338e51e8 Iustin Pop
5858 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5859 338e51e8 Iustin Pop
5860 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5861 57821cac Iustin Pop
      if not self.op.static:
5862 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5863 57821cac Iustin Pop
                                                  instance.name,
5864 57821cac Iustin Pop
                                                  instance.hypervisor)
5865 4c4e4e1e Iustin Pop
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5866 7ad1af4a Iustin Pop
        remote_info = remote_info.payload
5867 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5868 57821cac Iustin Pop
          remote_state = "up"
5869 57821cac Iustin Pop
        else:
5870 57821cac Iustin Pop
          remote_state = "down"
5871 a8083063 Iustin Pop
      else:
5872 57821cac Iustin Pop
        remote_state = None
5873 0d68c45d Iustin Pop
      if instance.admin_up:
5874 a8083063 Iustin Pop
        config_state = "up"
5875 0d68c45d Iustin Pop
      else:
5876 0d68c45d Iustin Pop
        config_state = "down"
5877 a8083063 Iustin Pop
5878 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5879 a8083063 Iustin Pop
               for device in instance.disks]
5880 a8083063 Iustin Pop
5881 a8083063 Iustin Pop
      idict = {
5882 a8083063 Iustin Pop
        "name": instance.name,
5883 a8083063 Iustin Pop
        "config_state": config_state,
5884 a8083063 Iustin Pop
        "run_state": remote_state,
5885 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5886 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5887 a8083063 Iustin Pop
        "os": instance.os,
5888 0b13832c Guido Trotter
        # this happens to be the same format used for hooks
5889 0b13832c Guido Trotter
        "nics": _NICListToTuple(self, instance.nics),
5890 a8083063 Iustin Pop
        "disks": disks,
5891 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5892 24838135 Iustin Pop
        "network_port": instance.network_port,
5893 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5894 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5895 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5896 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5897 a8083063 Iustin Pop
        }
5898 a8083063 Iustin Pop
5899 a8083063 Iustin Pop
      result[instance.name] = idict
5900 a8083063 Iustin Pop
5901 a8083063 Iustin Pop
    return result
5902 a8083063 Iustin Pop
5903 a8083063 Iustin Pop
5904 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5905 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5906 a8083063 Iustin Pop

5907 a8083063 Iustin Pop
  """
5908 a8083063 Iustin Pop
  HPATH = "instance-modify"
5909 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5910 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5911 1a5c7281 Guido Trotter
  REQ_BGL = False
5912 1a5c7281 Guido Trotter
5913 24991749 Iustin Pop
  def CheckArguments(self):
5914 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5915 24991749 Iustin Pop
      self.op.nics = []
5916 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5917 24991749 Iustin Pop
      self.op.disks = []
5918 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5919 24991749 Iustin Pop
      self.op.beparams = {}
5920 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5921 24991749 Iustin Pop
      self.op.hvparams = {}
5922 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5923 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5924 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5925 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5926 24991749 Iustin Pop
5927 24991749 Iustin Pop
    # Disk validation
5928 24991749 Iustin Pop
    disk_addremove = 0
5929 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5930 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5931 24991749 Iustin Pop
        disk_addremove += 1
5932 24991749 Iustin Pop
        continue
5933 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5934 24991749 Iustin Pop
        disk_addremove += 1
5935 24991749 Iustin Pop
      else:
5936 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5937 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5938 8b46606c Guido Trotter
        if not isinstance(disk_dict, dict):
5939 8b46606c Guido Trotter
          msg = "Invalid disk value: expected dict, got '%s'" % disk_dict
5940 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
5941 8b46606c Guido Trotter
5942 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5943 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
5944 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
5945 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
5946 24991749 Iustin Pop
        size = disk_dict.get('size', None)
5947 24991749 Iustin Pop
        if size is None:
5948 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
5949 24991749 Iustin Pop
        try:
5950 24991749 Iustin Pop
          size = int(size)
5951 24991749 Iustin Pop
        except ValueError, err:
5952 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
5953 24991749 Iustin Pop
                                     str(err))
5954 24991749 Iustin Pop
        disk_dict['size'] = size
5955 24991749 Iustin Pop
      else:
5956 24991749 Iustin Pop
        # modification of disk
5957 24991749 Iustin Pop
        if 'size' in disk_dict:
5958 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
5959 24991749 Iustin Pop
                                     " grow-disk")
5960 24991749 Iustin Pop
5961 24991749 Iustin Pop
    if disk_addremove > 1:
5962 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
5963 24991749 Iustin Pop
                                 " supported at a time")
5964 24991749 Iustin Pop
5965 24991749 Iustin Pop
    # NIC validation
5966 24991749 Iustin Pop
    nic_addremove = 0
5967 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
5968 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
5969 24991749 Iustin Pop
        nic_addremove += 1
5970 24991749 Iustin Pop
        continue
5971 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
5972 24991749 Iustin Pop
        nic_addremove += 1
5973 24991749 Iustin Pop
      else:
5974 24991749 Iustin Pop
        if not isinstance(nic_op, int):
5975 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
5976 8b46606c Guido Trotter
        if not isinstance(nic_dict, dict):
5977 8b46606c Guido Trotter
          msg = "Invalid nic value: expected dict, got '%s'" % nic_dict
5978 8b46606c Guido Trotter
          raise errors.OpPrereqError(msg)
5979 24991749 Iustin Pop
5980 24991749 Iustin Pop
      # nic_dict should be a dict
5981 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
5982 24991749 Iustin Pop
      if nic_ip is not None:
5983 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
5984 24991749 Iustin Pop
          nic_dict['ip'] = None
5985 24991749 Iustin Pop
        else:
5986 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
5987 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
5988 5c44da6a Guido Trotter
5989 cd098c41 Guido Trotter
      nic_bridge = nic_dict.get('bridge', None)
5990 cd098c41 Guido Trotter
      nic_link = nic_dict.get('link', None)
5991 cd098c41 Guido Trotter
      if nic_bridge and nic_link:
5992 29921401 Iustin Pop
        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
5993 29921401 Iustin Pop
                                   " at the same time")
5994 cd098c41 Guido Trotter
      elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE:
5995 cd098c41 Guido Trotter
        nic_dict['bridge'] = None
5996 cd098c41 Guido Trotter
      elif nic_link and nic_link.lower() == constants.VALUE_NONE:
5997 cd098c41 Guido Trotter
        nic_dict['link'] = None
5998 cd098c41 Guido Trotter
5999 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6000 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6001 5c44da6a Guido Trotter
        if nic_mac is None:
6002 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6003 5c44da6a Guido Trotter
6004 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6005 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6006 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6007 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6008 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6009 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6010 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6011 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6012 5c44da6a Guido Trotter
6013 24991749 Iustin Pop
    if nic_addremove > 1:
6014 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6015 24991749 Iustin Pop
                                 " supported at a time")
6016 24991749 Iustin Pop
6017 1a5c7281 Guido Trotter
  def ExpandNames(self):
6018 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6019 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6020 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6021 74409b12 Iustin Pop
6022 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6023 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6024 74409b12 Iustin Pop
      self._LockInstancesNodes()
6025 a8083063 Iustin Pop
6026 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6027 a8083063 Iustin Pop
    """Build hooks env.
6028 a8083063 Iustin Pop

6029 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6030 a8083063 Iustin Pop

6031 a8083063 Iustin Pop
    """
6032 396e1b78 Michael Hanselmann
    args = dict()
6033 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6034 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6035 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6036 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6037 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6038 d8dcf3c9 Guido Trotter
    # information at all.
6039 d8dcf3c9 Guido Trotter
    if self.op.nics:
6040 d8dcf3c9 Guido Trotter
      args['nics'] = []
6041 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6042 62f0dd02 Guido Trotter
      c_nicparams = self.cluster.nicparams[constants.PP_DEFAULT]
6043 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6044 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6045 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6046 d8dcf3c9 Guido Trotter
        else:
6047 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6048 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6049 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6050 d8dcf3c9 Guido Trotter
        else:
6051 d8dcf3c9 Guido Trotter
          ip = nic.ip
6052 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6053 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6054 d8dcf3c9 Guido Trotter
        else:
6055 d8dcf3c9 Guido Trotter
          mac = nic.mac
6056 62f0dd02 Guido Trotter
        if idx in self.nic_pnew:
6057 62f0dd02 Guido Trotter
          nicparams = self.nic_pnew[idx]
6058 62f0dd02 Guido Trotter
        else:
6059 62f0dd02 Guido Trotter
          nicparams = objects.FillDict(c_nicparams, nic.nicparams)
6060 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6061 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6062 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6063 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6064 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6065 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6066 62f0dd02 Guido Trotter
        nicparams = self.nic_pnew[constants.DDM_ADD]
6067 62f0dd02 Guido Trotter
        mode = nicparams[constants.NIC_MODE]
6068 62f0dd02 Guido Trotter
        link = nicparams[constants.NIC_LINK]
6069 62f0dd02 Guido Trotter
        args['nics'].append((ip, mac, mode, link))
6070 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6071 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6072 d8dcf3c9 Guido Trotter
6073 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6074 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6075 a8083063 Iustin Pop
    return env, nl, nl
6076 a8083063 Iustin Pop
6077 0329617a Guido Trotter
  def _GetUpdatedParams(self, old_params, update_dict,
6078 0329617a Guido Trotter
                        default_values, parameter_types):
6079 0329617a Guido Trotter
    """Return the new params dict for the given params.
6080 0329617a Guido Trotter

6081 0329617a Guido Trotter
    @type old_params: dict
6082 f2fd87d7 Iustin Pop
    @param old_params: old parameters
6083 0329617a Guido Trotter
    @type update_dict: dict
6084 f2fd87d7 Iustin Pop
    @param update_dict: dict containing new parameter values,
6085 f2fd87d7 Iustin Pop
                        or constants.VALUE_DEFAULT to reset the
6086 f2fd87d7 Iustin Pop
                        parameter to its default value
6087 0329617a Guido Trotter
    @type default_values: dict
6088 0329617a Guido Trotter
    @param default_values: default values for the filled parameters
6089 0329617a Guido Trotter
    @type parameter_types: dict
6090 0329617a Guido Trotter
    @param parameter_types: dict mapping target dict keys to types
6091 0329617a Guido Trotter
                            in constants.ENFORCEABLE_TYPES
6092 0329617a Guido Trotter
    @rtype: (dict, dict)
6093 0329617a Guido Trotter
    @return: (new_parameters, filled_parameters)
6094 0329617a Guido Trotter

6095 0329617a Guido Trotter
    """
6096 0329617a Guido Trotter
    params_copy = copy.deepcopy(old_params)
6097 0329617a Guido Trotter
    for key, val in update_dict.iteritems():
6098 0329617a Guido Trotter
      if val == constants.VALUE_DEFAULT:
6099 0329617a Guido Trotter
        try:
6100 0329617a Guido Trotter
          del params_copy[key]
6101 0329617a Guido Trotter
        except KeyError:
6102 0329617a Guido Trotter
          pass
6103 0329617a Guido Trotter
      else:
6104 0329617a Guido Trotter
        params_copy[key] = val
6105 0329617a Guido Trotter
    utils.ForceDictType(params_copy, parameter_types)
6106 0329617a Guido Trotter
    params_filled = objects.FillDict(default_values, params_copy)
6107 0329617a Guido Trotter
    return (params_copy, params_filled)
6108 0329617a Guido Trotter
6109 a8083063 Iustin Pop
  def CheckPrereq(self):
6110 a8083063 Iustin Pop
    """Check prerequisites.
6111 a8083063 Iustin Pop

6112 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6113 a8083063 Iustin Pop

6114 a8083063 Iustin Pop
    """
6115 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
6116 a8083063 Iustin Pop
6117 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6118 31a853d2 Iustin Pop
6119 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6120 2ee88aeb Guido Trotter
    cluster = self.cluster = self.cfg.GetClusterInfo()
6121 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6122 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6123 6b12959c Iustin Pop
    pnode = instance.primary_node
6124 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6125 74409b12 Iustin Pop
6126 338e51e8 Iustin Pop
    # hvparams processing
6127 74409b12 Iustin Pop
    if self.op.hvparams:
6128 0329617a Guido Trotter
      i_hvdict, hv_new = self._GetUpdatedParams(
6129 0329617a Guido Trotter
                             instance.hvparams, self.op.hvparams,
6130 0329617a Guido Trotter
                             cluster.hvparams[instance.hypervisor],
6131 0329617a Guido Trotter
                             constants.HVS_PARAMETER_TYPES)
6132 74409b12 Iustin Pop
      # local check
6133 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6134 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6135 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6136 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6137 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6138 338e51e8 Iustin Pop
    else:
6139 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6140 338e51e8 Iustin Pop
6141 338e51e8 Iustin Pop
    # beparams processing
6142 338e51e8 Iustin Pop
    if self.op.beparams:
6143 0329617a Guido Trotter
      i_bedict, be_new = self._GetUpdatedParams(
6144 0329617a Guido Trotter
                             instance.beparams, self.op.beparams,
6145 0329617a Guido Trotter
                             cluster.beparams[constants.PP_DEFAULT],
6146 0329617a Guido Trotter
                             constants.BES_PARAMETER_TYPES)
6147 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6148 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6149 338e51e8 Iustin Pop
    else:
6150 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6151 74409b12 Iustin Pop
6152 cfefe007 Guido Trotter
    self.warn = []
6153 647a5d80 Iustin Pop
6154 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6155 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6156 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6157 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6158 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6159 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6160 72737a7f Iustin Pop
                                                  instance.hypervisor)
6161 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6162 72737a7f Iustin Pop
                                         instance.hypervisor)
6163 070e998b Iustin Pop
      pninfo = nodeinfo[pnode]
6164 4c4e4e1e Iustin Pop
      msg = pninfo.fail_msg
6165 070e998b Iustin Pop
      if msg:
6166 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6167 070e998b Iustin Pop
        self.warn.append("Can't get info from primary node %s: %s" %
6168 070e998b Iustin Pop
                         (pnode,  msg))
6169 070e998b Iustin Pop
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6170 070e998b Iustin Pop
        self.warn.append("Node data from primary node %s doesn't contain"
6171 070e998b Iustin Pop
                         " free memory information" % pnode)
6172 4c4e4e1e Iustin Pop
      elif instance_info.fail_msg:
6173 7ad1af4a Iustin Pop
        self.warn.append("Can't get instance runtime information: %s" %
6174 4c4e4e1e Iustin Pop
                        instance_info.fail_msg)
6175 cfefe007 Guido Trotter
      else:
6176 7ad1af4a Iustin Pop
        if instance_info.payload:
6177 7ad1af4a Iustin Pop
          current_mem = int(instance_info.payload['memory'])
6178 cfefe007 Guido Trotter
        else:
6179 cfefe007 Guido Trotter
          # Assume instance not running
6180 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6181 cfefe007 Guido Trotter
          # and we have no other way to check)
6182 cfefe007 Guido Trotter
          current_mem = 0
6183 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6184 070e998b Iustin Pop
                    pninfo.payload['memory_free'])
6185 cfefe007 Guido Trotter
        if miss_mem > 0:
6186 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6187 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6188 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6189 cfefe007 Guido Trotter
6190 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6191 070e998b Iustin Pop
        for node, nres in nodeinfo.items():
6192 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6193 ea33068f Iustin Pop
            continue
6194 4c4e4e1e Iustin Pop
          msg = nres.fail_msg
6195 070e998b Iustin Pop
          if msg:
6196 070e998b Iustin Pop
            self.warn.append("Can't get info from secondary node %s: %s" %
6197 070e998b Iustin Pop
                             (node, msg))
6198 070e998b Iustin Pop
          elif not isinstance(nres.payload.get('memory_free', None), int):
6199 070e998b Iustin Pop
            self.warn.append("Secondary node %s didn't return free"
6200 070e998b Iustin Pop
                             " memory information" % node)
6201 070e998b Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']:
6202 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6203 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6204 5bc84f33 Alexander Schreiber
6205 24991749 Iustin Pop
    # NIC processing
6206 cd098c41 Guido Trotter
    self.nic_pnew = {}
6207 cd098c41 Guido Trotter
    self.nic_pinst = {}
6208 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6209 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6210 24991749 Iustin Pop
        if not instance.nics:
6211 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6212 24991749 Iustin Pop
        continue
6213 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6214 24991749 Iustin Pop
        # an existing nic
6215 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6216 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6217 24991749 Iustin Pop
                                     " are 0 to %d" %
6218 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6219 cd098c41 Guido Trotter
        old_nic_params = instance.nics[nic_op].nicparams
6220 cd098c41 Guido Trotter
        old_nic_ip = instance.nics[nic_op].ip
6221 cd098c41 Guido Trotter
      else:
6222 cd098c41 Guido Trotter
        old_nic_params = {}
6223 cd098c41 Guido Trotter
        old_nic_ip = None
6224 cd098c41 Guido Trotter
6225 cd098c41 Guido Trotter
      update_params_dict = dict([(key, nic_dict[key])
6226 cd098c41 Guido Trotter
                                 for key in constants.NICS_PARAMETERS
6227 cd098c41 Guido Trotter
                                 if key in nic_dict])
6228 cd098c41 Guido Trotter
6229 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6230 cd098c41 Guido Trotter
        update_params_dict[constants.NIC_LINK] = nic_dict['bridge']
6231 cd098c41 Guido Trotter
6232 cd098c41 Guido Trotter
      new_nic_params, new_filled_nic_params = \
6233 cd098c41 Guido Trotter
          self._GetUpdatedParams(old_nic_params, update_params_dict,
6234 cd098c41 Guido Trotter
                                 cluster.nicparams[constants.PP_DEFAULT],
6235 cd098c41 Guido Trotter
                                 constants.NICS_PARAMETER_TYPES)
6236 cd098c41 Guido Trotter
      objects.NIC.CheckParameterSyntax(new_filled_nic_params)
6237 cd098c41 Guido Trotter
      self.nic_pinst[nic_op] = new_nic_params
6238 cd098c41 Guido Trotter
      self.nic_pnew[nic_op] = new_filled_nic_params
6239 cd098c41 Guido Trotter
      new_nic_mode = new_filled_nic_params[constants.NIC_MODE]
6240 cd098c41 Guido Trotter
6241 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6242 cd098c41 Guido Trotter
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6243 4c4e4e1e Iustin Pop
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6244 35c0c8da Iustin Pop
        if msg:
6245 35c0c8da Iustin Pop
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6246 24991749 Iustin Pop
          if self.force:
6247 24991749 Iustin Pop
            self.warn.append(msg)
6248 24991749 Iustin Pop
          else:
6249 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6250 cd098c41 Guido Trotter
      if new_nic_mode == constants.NIC_MODE_ROUTED:
6251 cd098c41 Guido Trotter
        if 'ip' in nic_dict:
6252 cd098c41 Guido Trotter
          nic_ip = nic_dict['ip']
6253 cd098c41 Guido Trotter
        else:
6254 cd098c41 Guido Trotter
          nic_ip = old_nic_ip
6255 cd098c41 Guido Trotter
        if nic_ip is None:
6256 cd098c41 Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic ip to None'
6257 cd098c41 Guido Trotter
                                     ' on a routed nic')
6258 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6259 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6260 5c44da6a Guido Trotter
        if nic_mac is None:
6261 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6262 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6263 5c44da6a Guido Trotter
          # otherwise generate the mac
6264 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6265 5c44da6a Guido Trotter
        else:
6266 5c44da6a Guido Trotter
          # or validate/reserve the current one
6267 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6268 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6269 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6270 24991749 Iustin Pop
6271 24991749 Iustin Pop
    # DISK processing
6272 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6273 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6274 24991749 Iustin Pop
                                 " diskless instances")
6275 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6276 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6277 24991749 Iustin Pop
        if len(instance.disks) == 1:
6278 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6279 24991749 Iustin Pop
                                     " an instance")
6280 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6281 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6282 4c4e4e1e Iustin Pop
        msg = ins_l.fail_msg
6283 aca13712 Iustin Pop
        if msg:
6284 aca13712 Iustin Pop
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6285 aca13712 Iustin Pop
                                     (pnode, msg))
6286 aca13712 Iustin Pop
        if instance.name in ins_l.payload:
6287 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6288 24991749 Iustin Pop
                                     " disks.")
6289 24991749 Iustin Pop
6290 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6291 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6292 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6293 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6294 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6295 24991749 Iustin Pop
        # an existing disk
6296 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6297 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6298 24991749 Iustin Pop
                                     " are 0 to %d" %
6299 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6300 24991749 Iustin Pop
6301 a8083063 Iustin Pop
    return
6302 a8083063 Iustin Pop
6303 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6304 a8083063 Iustin Pop
    """Modifies an instance.
6305 a8083063 Iustin Pop

6306 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6307 24991749 Iustin Pop

6308 a8083063 Iustin Pop
    """
6309 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6310 cfefe007 Guido Trotter
    # feedback_fn there.
6311 cfefe007 Guido Trotter
    for warn in self.warn:
6312 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6313 cfefe007 Guido Trotter
6314 a8083063 Iustin Pop
    result = []
6315 a8083063 Iustin Pop
    instance = self.instance
6316 cd098c41 Guido Trotter
    cluster = self.cluster
6317 24991749 Iustin Pop
    # disk changes
6318 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6319 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6320 24991749 Iustin Pop
        # remove the last disk
6321 24991749 Iustin Pop
        device = instance.disks.pop()
6322 24991749 Iustin Pop
        device_idx = len(instance.disks)
6323 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6324 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6325 4c4e4e1e Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6326 e1bc0878 Iustin Pop
          if msg:
6327 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6328 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6329 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6330 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6331 24991749 Iustin Pop
        # add a new disk
6332 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6333 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6334 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6335 24991749 Iustin Pop
        else:
6336 24991749 Iustin Pop
          file_driver = file_path = None
6337 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6338 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6339 24991749 Iustin Pop
                                         instance.disk_template,
6340 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6341 24991749 Iustin Pop
                                         instance.secondary_nodes,
6342 24991749 Iustin Pop
                                         [disk_dict],
6343 24991749 Iustin Pop
                                         file_path,
6344 24991749 Iustin Pop
                                         file_driver,
6345 24991749 Iustin Pop
                                         disk_idx_base)[0]
6346 24991749 Iustin Pop
        instance.disks.append(new_disk)
6347 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6348 24991749 Iustin Pop
6349 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6350 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6351 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6352 24991749 Iustin Pop
        #HARDCODE
6353 428958aa Iustin Pop
        for node in instance.all_nodes:
6354 428958aa Iustin Pop
          f_create = node == instance.primary_node
6355 796cab27 Iustin Pop
          try:
6356 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6357 428958aa Iustin Pop
                            f_create, info, f_create)
6358 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6359 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6360 428958aa Iustin Pop
                            " node %s: %s",
6361 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6362 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6363 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6364 24991749 Iustin Pop
      else:
6365 24991749 Iustin Pop
        # change a given disk
6366 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6367 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6368 24991749 Iustin Pop
    # NIC changes
6369 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6370 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6371 24991749 Iustin Pop
        # remove the last nic
6372 24991749 Iustin Pop
        del instance.nics[-1]
6373 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6374 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6375 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6376 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6377 cd098c41 Guido Trotter
        ip = nic_dict.get('ip', None)
6378 cd098c41 Guido Trotter
        nicparams = self.nic_pinst[constants.DDM_ADD]
6379 cd098c41 Guido Trotter
        new_nic = objects.NIC(mac=mac, ip=ip, nicparams=nicparams)
6380 24991749 Iustin Pop
        instance.nics.append(new_nic)
6381 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6382 cd098c41 Guido Trotter
                       "add:mac=%s,ip=%s,mode=%s,link=%s" %
6383 cd098c41 Guido Trotter
                       (new_nic.mac, new_nic.ip,
6384 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_MODE],
6385 cd098c41 Guido Trotter
                        self.nic_pnew[constants.DDM_ADD][constants.NIC_LINK]
6386 cd098c41 Guido Trotter
                       )))
6387 24991749 Iustin Pop
      else:
6388 cd098c41 Guido Trotter
        for key in 'mac', 'ip':
6389 24991749 Iustin Pop
          if key in nic_dict:
6390 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6391 cd098c41 Guido Trotter
        if nic_op in self.nic_pnew:
6392 cd098c41 Guido Trotter
          instance.nics[nic_op].nicparams = self.nic_pnew[nic_op]
6393 cd098c41 Guido Trotter
        for key, val in nic_dict.iteritems():
6394 cd098c41 Guido Trotter
          result.append(("nic.%s/%d" % (key, nic_op), val))
6395 24991749 Iustin Pop
6396 24991749 Iustin Pop
    # hvparams changes
6397 74409b12 Iustin Pop
    if self.op.hvparams:
6398 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6399 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6400 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6401 24991749 Iustin Pop
6402 24991749 Iustin Pop
    # beparams changes
6403 338e51e8 Iustin Pop
    if self.op.beparams:
6404 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6405 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6406 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6407 a8083063 Iustin Pop
6408 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6409 a8083063 Iustin Pop
6410 a8083063 Iustin Pop
    return result
6411 a8083063 Iustin Pop
6412 a8083063 Iustin Pop
6413 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6414 a8083063 Iustin Pop
  """Query the exports list
6415 a8083063 Iustin Pop

6416 a8083063 Iustin Pop
  """
6417 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6418 21a15682 Guido Trotter
  REQ_BGL = False
6419 21a15682 Guido Trotter
6420 21a15682 Guido Trotter
  def ExpandNames(self):
6421 21a15682 Guido Trotter
    self.needed_locks = {}
6422 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6423 21a15682 Guido Trotter
    if not self.op.nodes:
6424 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6425 21a15682 Guido Trotter
    else:
6426 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6427 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6428 a8083063 Iustin Pop
6429 a8083063 Iustin Pop
  def CheckPrereq(self):
6430 21a15682 Guido Trotter
    """Check prerequisites.
6431 a8083063 Iustin Pop

6432 a8083063 Iustin Pop
    """
6433 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6434 a8083063 Iustin Pop
6435 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6436 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6437 a8083063 Iustin Pop

6438 e4376078 Iustin Pop
    @rtype: dict
6439 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6440 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6441 e4376078 Iustin Pop
        that node.
6442 a8083063 Iustin Pop

6443 a8083063 Iustin Pop
    """
6444 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6445 b04285f2 Guido Trotter
    result = {}
6446 b04285f2 Guido Trotter
    for node in rpcresult:
6447 4c4e4e1e Iustin Pop
      if rpcresult[node].fail_msg:
6448 b04285f2 Guido Trotter
        result[node] = False
6449 b04285f2 Guido Trotter
      else:
6450 1b7bfbb7 Iustin Pop
        result[node] = rpcresult[node].payload
6451 b04285f2 Guido Trotter
6452 b04285f2 Guido Trotter
    return result
6453 a8083063 Iustin Pop
6454 a8083063 Iustin Pop
6455 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6456 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6457 a8083063 Iustin Pop

6458 a8083063 Iustin Pop
  """
6459 a8083063 Iustin Pop
  HPATH = "instance-export"
6460 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6461 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6462 6657590e Guido Trotter
  REQ_BGL = False
6463 6657590e Guido Trotter
6464 6657590e Guido Trotter
  def ExpandNames(self):
6465 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6466 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6467 6657590e Guido Trotter
    #
6468 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6469 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6470 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6471 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6472 6657590e Guido Trotter
    #    then one to remove, after
6473 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
6474 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6475 6657590e Guido Trotter
6476 6657590e Guido Trotter
  def DeclareLocks(self, level):
6477 6657590e Guido Trotter
    """Last minute lock declaration."""
6478 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6479 a8083063 Iustin Pop
6480 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6481 a8083063 Iustin Pop
    """Build hooks env.
6482 a8083063 Iustin Pop

6483 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6484 a8083063 Iustin Pop

6485 a8083063 Iustin Pop
    """
6486 a8083063 Iustin Pop
    env = {
6487 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6488 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6489 a8083063 Iustin Pop
      }
6490 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6491 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6492 a8083063 Iustin Pop
          self.op.target_node]
6493 a8083063 Iustin Pop
    return env, nl, nl
6494 a8083063 Iustin Pop
6495 a8083063 Iustin Pop
  def CheckPrereq(self):
6496 a8083063 Iustin Pop
    """Check prerequisites.
6497 a8083063 Iustin Pop

6498 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6499 a8083063 Iustin Pop

6500 a8083063 Iustin Pop
    """
6501 6657590e Guido Trotter
    instance_name = self.op.instance_name
6502 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6503 6657590e Guido Trotter
    assert self.instance is not None, \
6504 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6505 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6506 a8083063 Iustin Pop
6507 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6508 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6509 a8083063 Iustin Pop
6510 268b8e42 Iustin Pop
    if self.dst_node is None:
6511 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6512 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6513 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6514 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6515 a8083063 Iustin Pop
6516 b6023d6c Manuel Franceschini
    # instance disk type verification
6517 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6518 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6519 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6520 b6023d6c Manuel Franceschini
                                   " file-based disks")
6521 b6023d6c Manuel Franceschini
6522 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6523 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6524 a8083063 Iustin Pop

6525 a8083063 Iustin Pop
    """
6526 a8083063 Iustin Pop
    instance = self.instance
6527 a8083063 Iustin Pop
    dst_node = self.dst_node
6528 a8083063 Iustin Pop
    src_node = instance.primary_node
6529 a8083063 Iustin Pop
    if self.op.shutdown:
6530 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6531 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6532 4c4e4e1e Iustin Pop
      result.Raise("Could not shutdown instance %s on"
6533 4c4e4e1e Iustin Pop
                   " node %s" % (instance.name, src_node))
6534 a8083063 Iustin Pop
6535 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6536 a8083063 Iustin Pop
6537 a8083063 Iustin Pop
    snap_disks = []
6538 a8083063 Iustin Pop
6539 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6540 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6541 998c712c Iustin Pop
    for disk in instance.disks:
6542 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6543 998c712c Iustin Pop
6544 a8083063 Iustin Pop
    try:
6545 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6546 87812fd3 Iustin Pop
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6547 87812fd3 Iustin Pop
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6548 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6549 87812fd3 Iustin Pop
        if msg:
6550 af0413bb Guido Trotter
          self.LogWarning("Could not snapshot disk/%s on node %s: %s",
6551 af0413bb Guido Trotter
                          idx, src_node, msg)
6552 19d7f90a Guido Trotter
          snap_disks.append(False)
6553 19d7f90a Guido Trotter
        else:
6554 87812fd3 Iustin Pop
          disk_id = (vgname, result.payload)
6555 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6556 87812fd3 Iustin Pop
                                 logical_id=disk_id, physical_id=disk_id,
6557 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6558 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6559 a8083063 Iustin Pop
6560 a8083063 Iustin Pop
    finally:
6561 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6562 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6563 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6564 dd279568 Iustin Pop
        if msg:
6565 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6566 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6567 a8083063 Iustin Pop
6568 a8083063 Iustin Pop
    # TODO: check for size
6569 a8083063 Iustin Pop
6570 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6571 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6572 19d7f90a Guido Trotter
      if dev:
6573 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6574 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6575 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6576 ba55d062 Iustin Pop
        if msg:
6577 af0413bb Guido Trotter
          self.LogWarning("Could not export disk/%s from node %s to"
6578 af0413bb Guido Trotter
                          " node %s: %s", idx, src_node, dst_node.name, msg)
6579 4c4e4e1e Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6580 e1bc0878 Iustin Pop
        if msg:
6581 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6582 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6583 a8083063 Iustin Pop
6584 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6585 4c4e4e1e Iustin Pop
    msg = result.fail_msg
6586 9b201a0d Iustin Pop
    if msg:
6587 9b201a0d Iustin Pop
      self.LogWarning("Could not finalize export for instance %s"
6588 9b201a0d Iustin Pop
                      " on node %s: %s", instance.name, dst_node.name, msg)
6589 a8083063 Iustin Pop
6590 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6591 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6592 a8083063 Iustin Pop
6593 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6594 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6595 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6596 35fbcd11 Iustin Pop
    iname = instance.name
6597 a8083063 Iustin Pop
    if nodelist:
6598 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6599 a8083063 Iustin Pop
      for node in exportlist:
6600 4c4e4e1e Iustin Pop
        if exportlist[node].fail_msg:
6601 781de953 Iustin Pop
          continue
6602 35fbcd11 Iustin Pop
        if iname in exportlist[node].payload:
6603 4c4e4e1e Iustin Pop
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6604 35fbcd11 Iustin Pop
          if msg:
6605 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6606 35fbcd11 Iustin Pop
                            " on node %s: %s", iname, node, msg)
6607 5c947f38 Iustin Pop
6608 5c947f38 Iustin Pop
6609 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6610 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6611 9ac99fda Guido Trotter

6612 9ac99fda Guido Trotter
  """
6613 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6614 3656b3af Guido Trotter
  REQ_BGL = False
6615 3656b3af Guido Trotter
6616 3656b3af Guido Trotter
  def ExpandNames(self):
6617 3656b3af Guido Trotter
    self.needed_locks = {}
6618 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6619 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6620 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6621 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6622 9ac99fda Guido Trotter
6623 9ac99fda Guido Trotter
  def CheckPrereq(self):
6624 9ac99fda Guido Trotter
    """Check prerequisites.
6625 9ac99fda Guido Trotter
    """
6626 9ac99fda Guido Trotter
    pass
6627 9ac99fda Guido Trotter
6628 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6629 9ac99fda Guido Trotter
    """Remove any export.
6630 9ac99fda Guido Trotter

6631 9ac99fda Guido Trotter
    """
6632 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6633 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6634 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6635 9ac99fda Guido Trotter
    fqdn_warn = False
6636 9ac99fda Guido Trotter
    if not instance_name:
6637 9ac99fda Guido Trotter
      fqdn_warn = True
6638 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6639 9ac99fda Guido Trotter
6640 1b7bfbb7 Iustin Pop
    locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
6641 1b7bfbb7 Iustin Pop
    exportlist = self.rpc.call_export_list(locked_nodes)
6642 9ac99fda Guido Trotter
    found = False
6643 9ac99fda Guido Trotter
    for node in exportlist:
6644 4c4e4e1e Iustin Pop
      msg = exportlist[node].fail_msg
6645 1b7bfbb7 Iustin Pop
      if msg:
6646 1b7bfbb7 Iustin Pop
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6647 781de953 Iustin Pop
        continue
6648 1b7bfbb7 Iustin Pop
      if instance_name in exportlist[node].payload:
6649 9ac99fda Guido Trotter
        found = True
6650 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6651 4c4e4e1e Iustin Pop
        msg = result.fail_msg
6652 35fbcd11 Iustin Pop
        if msg:
6653 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6654 35fbcd11 Iustin Pop
                        " on node %s: %s", instance_name, node, msg)
6655 9ac99fda Guido Trotter
6656 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6657 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6658 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6659 9ac99fda Guido Trotter
                  " Domain Name.")
6660 9ac99fda Guido Trotter
6661 9ac99fda Guido Trotter
6662 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
6663 5c947f38 Iustin Pop
  """Generic tags LU.
6664 5c947f38 Iustin Pop

6665 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6666 5c947f38 Iustin Pop

6667 5c947f38 Iustin Pop
  """
6668 5c947f38 Iustin Pop
6669 8646adce Guido Trotter
  def ExpandNames(self):
6670 8646adce Guido Trotter
    self.needed_locks = {}
6671 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6672 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6673 5c947f38 Iustin Pop
      if name is None:
6674 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6675 3ecf6786 Iustin Pop
                                   (self.op.name,))
6676 5c947f38 Iustin Pop
      self.op.name = name
6677 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6678 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6679 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6680 5c947f38 Iustin Pop
      if name is None:
6681 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6682 3ecf6786 Iustin Pop
                                   (self.op.name,))
6683 5c947f38 Iustin Pop
      self.op.name = name
6684 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6685 8646adce Guido Trotter
6686 8646adce Guido Trotter
  def CheckPrereq(self):
6687 8646adce Guido Trotter
    """Check prerequisites.
6688 8646adce Guido Trotter

6689 8646adce Guido Trotter
    """
6690 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6691 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6692 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6693 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6694 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6695 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6696 5c947f38 Iustin Pop
    else:
6697 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6698 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6699 5c947f38 Iustin Pop
6700 5c947f38 Iustin Pop
6701 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6702 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6703 5c947f38 Iustin Pop

6704 5c947f38 Iustin Pop
  """
6705 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6706 8646adce Guido Trotter
  REQ_BGL = False
6707 5c947f38 Iustin Pop
6708 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6709 5c947f38 Iustin Pop
    """Returns the tag list.
6710 5c947f38 Iustin Pop

6711 5c947f38 Iustin Pop
    """
6712 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6713 5c947f38 Iustin Pop
6714 5c947f38 Iustin Pop
6715 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6716 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6717 73415719 Iustin Pop

6718 73415719 Iustin Pop
  """
6719 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6720 8646adce Guido Trotter
  REQ_BGL = False
6721 8646adce Guido Trotter
6722 8646adce Guido Trotter
  def ExpandNames(self):
6723 8646adce Guido Trotter
    self.needed_locks = {}
6724 73415719 Iustin Pop
6725 73415719 Iustin Pop
  def CheckPrereq(self):
6726 73415719 Iustin Pop
    """Check prerequisites.
6727 73415719 Iustin Pop

6728 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6729 73415719 Iustin Pop

6730 73415719 Iustin Pop
    """
6731 73415719 Iustin Pop
    try:
6732 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6733 73415719 Iustin Pop
    except re.error, err:
6734 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6735 73415719 Iustin Pop
                                 (self.op.pattern, err))
6736 73415719 Iustin Pop
6737 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6738 73415719 Iustin Pop
    """Returns the tag list.
6739 73415719 Iustin Pop

6740 73415719 Iustin Pop
    """
6741 73415719 Iustin Pop
    cfg = self.cfg
6742 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6743 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6744 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6745 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6746 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6747 73415719 Iustin Pop
    results = []
6748 73415719 Iustin Pop
    for path, target in tgts:
6749 73415719 Iustin Pop
      for tag in target.GetTags():
6750 73415719 Iustin Pop
        if self.re.search(tag):
6751 73415719 Iustin Pop
          results.append((path, tag))
6752 73415719 Iustin Pop
    return results
6753 73415719 Iustin Pop
6754 73415719 Iustin Pop
6755 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6756 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6757 5c947f38 Iustin Pop

6758 5c947f38 Iustin Pop
  """
6759 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6760 8646adce Guido Trotter
  REQ_BGL = False
6761 5c947f38 Iustin Pop
6762 5c947f38 Iustin Pop
  def CheckPrereq(self):
6763 5c947f38 Iustin Pop
    """Check prerequisites.
6764 5c947f38 Iustin Pop

6765 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6766 5c947f38 Iustin Pop

6767 5c947f38 Iustin Pop
    """
6768 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6769 f27302fa Iustin Pop
    for tag in self.op.tags:
6770 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6771 5c947f38 Iustin Pop
6772 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6773 5c947f38 Iustin Pop
    """Sets the tag.
6774 5c947f38 Iustin Pop

6775 5c947f38 Iustin Pop
    """
6776 5c947f38 Iustin Pop
    try:
6777 f27302fa Iustin Pop
      for tag in self.op.tags:
6778 f27302fa Iustin Pop
        self.target.AddTag(tag)
6779 5c947f38 Iustin Pop
    except errors.TagError, err:
6780 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6781 5c947f38 Iustin Pop
    try:
6782 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6783 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6784 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6785 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6786 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6787 5c947f38 Iustin Pop
6788 5c947f38 Iustin Pop
6789 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6790 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6791 5c947f38 Iustin Pop

6792 5c947f38 Iustin Pop
  """
6793 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6794 8646adce Guido Trotter
  REQ_BGL = False
6795 5c947f38 Iustin Pop
6796 5c947f38 Iustin Pop
  def CheckPrereq(self):
6797 5c947f38 Iustin Pop
    """Check prerequisites.
6798 5c947f38 Iustin Pop

6799 5c947f38 Iustin Pop
    This checks that we have the given tag.
6800 5c947f38 Iustin Pop

6801 5c947f38 Iustin Pop
    """
6802 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6803 f27302fa Iustin Pop
    for tag in self.op.tags:
6804 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6805 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6806 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6807 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6808 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6809 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6810 f27302fa Iustin Pop
      diff_names.sort()
6811 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6812 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6813 5c947f38 Iustin Pop
6814 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6815 5c947f38 Iustin Pop
    """Remove the tag from the object.
6816 5c947f38 Iustin Pop

6817 5c947f38 Iustin Pop
    """
6818 f27302fa Iustin Pop
    for tag in self.op.tags:
6819 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6820 5c947f38 Iustin Pop
    try:
6821 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6822 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6823 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6824 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6825 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6826 06009e27 Iustin Pop
6827 0eed6e61 Guido Trotter
6828 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6829 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6830 06009e27 Iustin Pop

6831 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6832 06009e27 Iustin Pop
  time.
6833 06009e27 Iustin Pop

6834 06009e27 Iustin Pop
  """
6835 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6836 fbe9022f Guido Trotter
  REQ_BGL = False
6837 06009e27 Iustin Pop
6838 fbe9022f Guido Trotter
  def ExpandNames(self):
6839 fbe9022f Guido Trotter
    """Expand names and set required locks.
6840 06009e27 Iustin Pop

6841 fbe9022f Guido Trotter
    This expands the node list, if any.
6842 06009e27 Iustin Pop

6843 06009e27 Iustin Pop
    """
6844 fbe9022f Guido Trotter
    self.needed_locks = {}
6845 06009e27 Iustin Pop
    if self.op.on_nodes:
6846 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6847 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6848 fbe9022f Guido Trotter
      # more information.
6849 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6850 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6851 fbe9022f Guido Trotter
6852 fbe9022f Guido Trotter
  def CheckPrereq(self):
6853 fbe9022f Guido Trotter
    """Check prerequisites.
6854 fbe9022f Guido Trotter

6855 fbe9022f Guido Trotter
    """
6856 06009e27 Iustin Pop
6857 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6858 06009e27 Iustin Pop
    """Do the actual sleep.
6859 06009e27 Iustin Pop

6860 06009e27 Iustin Pop
    """
6861 06009e27 Iustin Pop
    if self.op.on_master:
6862 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6863 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6864 06009e27 Iustin Pop
    if self.op.on_nodes:
6865 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6866 06009e27 Iustin Pop
      for node, node_result in result.items():
6867 4c4e4e1e Iustin Pop
        node_result.Raise("Failure during rpc call to node %s" % node)
6868 d61df03e Iustin Pop
6869 d61df03e Iustin Pop
6870 d1c2dd75 Iustin Pop
class IAllocator(object):
6871 d1c2dd75 Iustin Pop
  """IAllocator framework.
6872 d61df03e Iustin Pop

6873 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6874 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6875 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6876 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6877 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6878 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6879 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6880 d1c2dd75 Iustin Pop
      easy usage
6881 d61df03e Iustin Pop

6882 d61df03e Iustin Pop
  """
6883 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6884 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6885 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6886 d1c2dd75 Iustin Pop
    ]
6887 29859cb7 Iustin Pop
  _RELO_KEYS = [
6888 29859cb7 Iustin Pop
    "relocate_from",
6889 29859cb7 Iustin Pop
    ]
6890 d1c2dd75 Iustin Pop
6891 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6892 72737a7f Iustin Pop
    self.lu = lu
6893 d1c2dd75 Iustin Pop
    # init buffer variables
6894 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6895 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6896 29859cb7 Iustin Pop
    self.mode = mode
6897 29859cb7 Iustin Pop
    self.name = name
6898 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6899 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6900 a0add446 Iustin Pop
    self.hypervisor = None
6901 29859cb7 Iustin Pop
    self.relocate_from = None
6902 27579978 Iustin Pop
    # computed fields
6903 27579978 Iustin Pop
    self.required_nodes = None
6904 d1c2dd75 Iustin Pop
    # init result fields
6905 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6906 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6907 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6908 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6909 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6910 29859cb7 Iustin Pop
    else:
6911 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6912 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6913 d1c2dd75 Iustin Pop
    for key in kwargs:
6914 29859cb7 Iustin Pop
      if key not in keyset:
6915 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6916 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6917 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6918 29859cb7 Iustin Pop
    for key in keyset:
6919 d1c2dd75 Iustin Pop
      if key not in kwargs:
6920 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6921 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6922 d1c2dd75 Iustin Pop
    self._BuildInputData()
6923 d1c2dd75 Iustin Pop
6924 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6925 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6926 d1c2dd75 Iustin Pop

6927 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6928 d1c2dd75 Iustin Pop

6929 d1c2dd75 Iustin Pop
    """
6930 72737a7f Iustin Pop
    cfg = self.lu.cfg
6931 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6932 d1c2dd75 Iustin Pop
    # cluster data
6933 d1c2dd75 Iustin Pop
    data = {
6934 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6935 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6936 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6937 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6938 d1c2dd75 Iustin Pop
      # we don't have job IDs
6939 d61df03e Iustin Pop
      }
6940 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6941 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6942 6286519f Iustin Pop
6943 d1c2dd75 Iustin Pop
    # node data
6944 d1c2dd75 Iustin Pop
    node_results = {}
6945 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6946 8cc7e742 Guido Trotter
6947 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6948 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6949 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6950 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6951 8cc7e742 Guido Trotter
6952 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6953 a0add446 Iustin Pop
                                           hypervisor_name)
6954 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6955 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6956 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6957 1325da74 Iustin Pop
      # first fill in static (config-based) values
6958 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6959 d1c2dd75 Iustin Pop
      pnr = {
6960 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6961 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6962 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6963 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6964 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6965 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6966 d1c2dd75 Iustin Pop
        }
6967 1325da74 Iustin Pop
6968 1325da74 Iustin Pop
      if not ninfo.offline:
6969 4c4e4e1e Iustin Pop
        nresult.Raise("Can't get data for node %s" % nname)
6970 4c4e4e1e Iustin Pop
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
6971 4c4e4e1e Iustin Pop
                                nname)
6972 070e998b Iustin Pop
        remote_info = nresult.payload
6973 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6974 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6975 1325da74 Iustin Pop
          if attr not in remote_info:
6976 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6977 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6978 070e998b Iustin Pop
          if not isinstance(remote_info[attr], int):
6979 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6980 070e998b Iustin Pop
                                     " for '%s': %s" %
6981 070e998b Iustin Pop
                                     (nname, attr, remote_info[attr]))
6982 1325da74 Iustin Pop
        # compute memory used by primary instances
6983 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6984 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6985 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6986 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6987 2fa74ef4 Iustin Pop
            if iinfo.name not in node_iinfo[nname].payload:
6988 1325da74 Iustin Pop
              i_used_mem = 0
6989 1325da74 Iustin Pop
            else:
6990 2fa74ef4 Iustin Pop
              i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory'])
6991 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6992 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6993 1325da74 Iustin Pop
6994 1325da74 Iustin Pop
            if iinfo.admin_up:
6995 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6996 1325da74 Iustin Pop
6997 1325da74 Iustin Pop
        # compute memory used by instances
6998 1325da74 Iustin Pop
        pnr_dyn = {
6999 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
7000 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
7001 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
7002 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
7003 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
7004 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
7005 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
7006 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
7007 1325da74 Iustin Pop
          }
7008 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
7009 1325da74 Iustin Pop
7010 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
7011 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
7012 d1c2dd75 Iustin Pop
7013 d1c2dd75 Iustin Pop
    # instance data
7014 d1c2dd75 Iustin Pop
    instance_data = {}
7015 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
7016 a9fe7e8f Guido Trotter
      nic_data = []
7017 a9fe7e8f Guido Trotter
      for nic in iinfo.nics:
7018 a9fe7e8f Guido Trotter
        filled_params = objects.FillDict(
7019 a9fe7e8f Guido Trotter
            cluster_info.nicparams[constants.PP_DEFAULT],
7020 a9fe7e8f Guido Trotter
            nic.nicparams)
7021 a9fe7e8f Guido Trotter
        nic_dict = {"mac": nic.mac,
7022 a9fe7e8f Guido Trotter
                    "ip": nic.ip,
7023 a9fe7e8f Guido Trotter
                    "mode": filled_params[constants.NIC_MODE],
7024 a9fe7e8f Guido Trotter
                    "link": filled_params[constants.NIC_LINK],
7025 a9fe7e8f Guido Trotter
                   }
7026 a9fe7e8f Guido Trotter
        if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
7027 a9fe7e8f Guido Trotter
          nic_dict["bridge"] = filled_params[constants.NIC_LINK]
7028 a9fe7e8f Guido Trotter
        nic_data.append(nic_dict)
7029 d1c2dd75 Iustin Pop
      pir = {
7030 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
7031 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
7032 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
7033 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
7034 d1c2dd75 Iustin Pop
        "os": iinfo.os,
7035 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
7036 d1c2dd75 Iustin Pop
        "nics": nic_data,
7037 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
7038 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
7039 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
7040 d1c2dd75 Iustin Pop
        }
7041 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
7042 88ae4f85 Iustin Pop
                                                 pir["disks"])
7043 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7044 d61df03e Iustin Pop
7045 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7046 d61df03e Iustin Pop
7047 d1c2dd75 Iustin Pop
    self.in_data = data
7048 d61df03e Iustin Pop
7049 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7050 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7051 d61df03e Iustin Pop

7052 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7053 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7054 d61df03e Iustin Pop

7055 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7056 d1c2dd75 Iustin Pop
    done.
7057 d61df03e Iustin Pop

7058 d1c2dd75 Iustin Pop
    """
7059 d1c2dd75 Iustin Pop
    data = self.in_data
7060 d1c2dd75 Iustin Pop
7061 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7062 d1c2dd75 Iustin Pop
7063 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7064 27579978 Iustin Pop
      self.required_nodes = 2
7065 27579978 Iustin Pop
    else:
7066 27579978 Iustin Pop
      self.required_nodes = 1
7067 d1c2dd75 Iustin Pop
    request = {
7068 d1c2dd75 Iustin Pop
      "type": "allocate",
7069 d1c2dd75 Iustin Pop
      "name": self.name,
7070 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7071 d1c2dd75 Iustin Pop
      "tags": self.tags,
7072 d1c2dd75 Iustin Pop
      "os": self.os,
7073 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7074 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7075 d1c2dd75 Iustin Pop
      "disks": self.disks,
7076 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7077 d1c2dd75 Iustin Pop
      "nics": self.nics,
7078 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7079 d1c2dd75 Iustin Pop
      }
7080 d1c2dd75 Iustin Pop
    data["request"] = request
7081 298fe380 Iustin Pop
7082 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7083 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7084 298fe380 Iustin Pop

7085 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7086 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7087 d61df03e Iustin Pop

7088 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7089 d1c2dd75 Iustin Pop
    done.
7090 d61df03e Iustin Pop

7091 d1c2dd75 Iustin Pop
    """
7092 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7093 27579978 Iustin Pop
    if instance is None:
7094 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7095 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7096 27579978 Iustin Pop
7097 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7098 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7099 27579978 Iustin Pop
7100 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7101 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7102 2a139bb0 Iustin Pop
7103 27579978 Iustin Pop
    self.required_nodes = 1
7104 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7105 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7106 27579978 Iustin Pop
7107 d1c2dd75 Iustin Pop
    request = {
7108 2a139bb0 Iustin Pop
      "type": "relocate",
7109 d1c2dd75 Iustin Pop
      "name": self.name,
7110 27579978 Iustin Pop
      "disk_space_total": disk_space,
7111 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7112 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7113 d1c2dd75 Iustin Pop
      }
7114 27579978 Iustin Pop
    self.in_data["request"] = request
7115 d61df03e Iustin Pop
7116 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7117 d1c2dd75 Iustin Pop
    """Build input data structures.
7118 d61df03e Iustin Pop

7119 d1c2dd75 Iustin Pop
    """
7120 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7121 d61df03e Iustin Pop
7122 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7123 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7124 d1c2dd75 Iustin Pop
    else:
7125 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7126 d61df03e Iustin Pop
7127 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7128 d61df03e Iustin Pop
7129 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7130 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7131 298fe380 Iustin Pop

7132 d1c2dd75 Iustin Pop
    """
7133 72737a7f Iustin Pop
    if call_fn is None:
7134 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7135 298fe380 Iustin Pop
7136 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7137 4c4e4e1e Iustin Pop
    result.Raise("Failure while running the iallocator script")
7138 8d528b7c Iustin Pop
7139 87f5c298 Iustin Pop
    self.out_text = result.payload
7140 d1c2dd75 Iustin Pop
    if validate:
7141 d1c2dd75 Iustin Pop
      self._ValidateResult()
7142 298fe380 Iustin Pop
7143 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7144 d1c2dd75 Iustin Pop
    """Process the allocator results.
7145 538475ca Iustin Pop

7146 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7147 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7148 538475ca Iustin Pop

7149 d1c2dd75 Iustin Pop
    """
7150 d1c2dd75 Iustin Pop
    try:
7151 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7152 d1c2dd75 Iustin Pop
    except Exception, err:
7153 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7154 d1c2dd75 Iustin Pop
7155 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7156 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7157 538475ca Iustin Pop
7158 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7159 d1c2dd75 Iustin Pop
      if key not in rdict:
7160 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7161 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7162 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7163 538475ca Iustin Pop
7164 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7165 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7166 d1c2dd75 Iustin Pop
                               " is not a list")
7167 d1c2dd75 Iustin Pop
    self.out_data = rdict
7168 538475ca Iustin Pop
7169 538475ca Iustin Pop
7170 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7171 d61df03e Iustin Pop
  """Run allocator tests.
7172 d61df03e Iustin Pop

7173 d61df03e Iustin Pop
  This LU runs the allocator tests
7174 d61df03e Iustin Pop

7175 d61df03e Iustin Pop
  """
7176 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7177 d61df03e Iustin Pop
7178 d61df03e Iustin Pop
  def CheckPrereq(self):
7179 d61df03e Iustin Pop
    """Check prerequisites.
7180 d61df03e Iustin Pop

7181 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7182 d61df03e Iustin Pop

7183 d61df03e Iustin Pop
    """
7184 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7185 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7186 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7187 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7188 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7189 d61df03e Iustin Pop
                                     attr)
7190 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7191 d61df03e Iustin Pop
      if iname is not None:
7192 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7193 d61df03e Iustin Pop
                                   iname)
7194 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7195 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7196 d61df03e Iustin Pop
      for row in self.op.nics:
7197 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7198 d61df03e Iustin Pop
            "mac" not in row or
7199 d61df03e Iustin Pop
            "ip" not in row or
7200 d61df03e Iustin Pop
            "bridge" not in row):
7201 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7202 d61df03e Iustin Pop
                                     " 'nics' parameter")
7203 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7204 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7205 d61df03e Iustin Pop
      for row in self.op.disks:
7206 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7207 d61df03e Iustin Pop
            "size" not in row or
7208 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7209 d61df03e Iustin Pop
            "mode" not in row or
7210 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7211 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7212 d61df03e Iustin Pop
                                     " 'disks' parameter")
7213 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7214 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7215 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7216 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7217 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7218 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7219 d61df03e Iustin Pop
      if fname is None:
7220 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7221 d61df03e Iustin Pop
                                   self.op.name)
7222 d61df03e Iustin Pop
      self.op.name = fname
7223 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7224 d61df03e Iustin Pop
    else:
7225 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7226 d61df03e Iustin Pop
                                 self.op.mode)
7227 d61df03e Iustin Pop
7228 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7229 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7230 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7231 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7232 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7233 d61df03e Iustin Pop
                                 self.op.direction)
7234 d61df03e Iustin Pop
7235 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7236 d61df03e Iustin Pop
    """Run the allocator test.
7237 d61df03e Iustin Pop

7238 d61df03e Iustin Pop
    """
7239 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7240 72737a7f Iustin Pop
      ial = IAllocator(self,
7241 29859cb7 Iustin Pop
                       mode=self.op.mode,
7242 29859cb7 Iustin Pop
                       name=self.op.name,
7243 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7244 29859cb7 Iustin Pop
                       disks=self.op.disks,
7245 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7246 29859cb7 Iustin Pop
                       os=self.op.os,
7247 29859cb7 Iustin Pop
                       tags=self.op.tags,
7248 29859cb7 Iustin Pop
                       nics=self.op.nics,
7249 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7250 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7251 29859cb7 Iustin Pop
                       )
7252 29859cb7 Iustin Pop
    else:
7253 72737a7f Iustin Pop
      ial = IAllocator(self,
7254 29859cb7 Iustin Pop
                       mode=self.op.mode,
7255 29859cb7 Iustin Pop
                       name=self.op.name,
7256 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7257 29859cb7 Iustin Pop
                       )
7258 d61df03e Iustin Pop
7259 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7260 d1c2dd75 Iustin Pop
      result = ial.in_text
7261 298fe380 Iustin Pop
    else:
7262 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7263 d1c2dd75 Iustin Pop
      result = ial.out_text
7264 298fe380 Iustin Pop
    return result