Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ c70d2d9b

History | View | Annotate | Download (253 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 e7c6e02b Michael Hanselmann
# Copyright (C) 2006, 2007, 2008 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 c70d2d9b Iustin Pop
# pylint: disable-msg=W0201
25 c70d2d9b Iustin Pop
26 c70d2d9b Iustin Pop
# W0201 since most LU attributes are defined in CheckPrereq or similar
27 c70d2d9b Iustin Pop
# functions
28 a8083063 Iustin Pop
29 a8083063 Iustin Pop
import os
30 a8083063 Iustin Pop
import os.path
31 a8083063 Iustin Pop
import time
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 ffa1c0dc Iustin Pop
import logging
35 74409b12 Iustin Pop
import copy
36 a8083063 Iustin Pop
37 a8083063 Iustin Pop
from ganeti import ssh
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 6048c986 Guido Trotter
from ganeti import locking
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 8d14b30d Iustin Pop
from ganeti import serializer
45 112f18a5 Iustin Pop
from ganeti import ssconf
46 d61df03e Iustin Pop
47 d61df03e Iustin Pop
48 a8083063 Iustin Pop
class LogicalUnit(object):
49 396e1b78 Michael Hanselmann
  """Logical Unit base class.
50 a8083063 Iustin Pop

51 a8083063 Iustin Pop
  Subclasses must follow these rules:
52 d465bdc8 Guido Trotter
    - implement ExpandNames
53 d465bdc8 Guido Trotter
    - implement CheckPrereq
54 a8083063 Iustin Pop
    - implement Exec
55 a8083063 Iustin Pop
    - implement BuildHooksEnv
56 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
57 05f86716 Guido Trotter
    - optionally redefine their run requirements:
58 7e55040e Guido Trotter
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59 05f86716 Guido Trotter

60 05f86716 Guido Trotter
  Note that all commands require root permissions.
61 a8083063 Iustin Pop

62 a8083063 Iustin Pop
  """
63 a8083063 Iustin Pop
  HPATH = None
64 a8083063 Iustin Pop
  HTYPE = None
65 a8083063 Iustin Pop
  _OP_REQP = []
66 7e55040e Guido Trotter
  REQ_BGL = True
67 a8083063 Iustin Pop
68 72737a7f Iustin Pop
  def __init__(self, processor, op, context, rpc):
69 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
70 a8083063 Iustin Pop

71 5bbd3f7f Michael Hanselmann
    This needs to be overridden in derived classes in order to check op
72 a8083063 Iustin Pop
    validity.
73 a8083063 Iustin Pop

74 a8083063 Iustin Pop
    """
75 5bfac263 Iustin Pop
    self.proc = processor
76 a8083063 Iustin Pop
    self.op = op
77 77b657a3 Guido Trotter
    self.cfg = context.cfg
78 77b657a3 Guido Trotter
    self.context = context
79 72737a7f Iustin Pop
    self.rpc = rpc
80 ca2a79e1 Guido Trotter
    # Dicts used to declare locking needs to mcpu
81 d465bdc8 Guido Trotter
    self.needed_locks = None
82 6683bba2 Guido Trotter
    self.acquired_locks = {}
83 3977a4c1 Guido Trotter
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
84 ca2a79e1 Guido Trotter
    self.add_locks = {}
85 ca2a79e1 Guido Trotter
    self.remove_locks = {}
86 c4a2fee1 Guido Trotter
    # Used to force good behavior when calling helper functions
87 c4a2fee1 Guido Trotter
    self.recalculate_locks = {}
88 c92b310a Michael Hanselmann
    self.__ssh = None
89 86d9d3bb Iustin Pop
    # logging
90 fe267188 Iustin Pop
    self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103
91 fe267188 Iustin Pop
    self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103
92 c92b310a Michael Hanselmann
93 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
94 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
95 a8083063 Iustin Pop
      if attr_val is None:
96 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
97 3ecf6786 Iustin Pop
                                   attr_name)
98 4be4691d Iustin Pop
    self.CheckArguments()
99 a8083063 Iustin Pop
100 c92b310a Michael Hanselmann
  def __GetSSH(self):
101 c92b310a Michael Hanselmann
    """Returns the SshRunner object
102 c92b310a Michael Hanselmann

103 c92b310a Michael Hanselmann
    """
104 c92b310a Michael Hanselmann
    if not self.__ssh:
105 6b0469d2 Iustin Pop
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
106 c92b310a Michael Hanselmann
    return self.__ssh
107 c92b310a Michael Hanselmann
108 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
109 c92b310a Michael Hanselmann
110 4be4691d Iustin Pop
  def CheckArguments(self):
111 4be4691d Iustin Pop
    """Check syntactic validity for the opcode arguments.
112 4be4691d Iustin Pop

113 4be4691d Iustin Pop
    This method is for doing a simple syntactic check and ensure
114 4be4691d Iustin Pop
    validity of opcode parameters, without any cluster-related
115 4be4691d Iustin Pop
    checks. While the same can be accomplished in ExpandNames and/or
116 4be4691d Iustin Pop
    CheckPrereq, doing these separate is better because:
117 4be4691d Iustin Pop

118 4be4691d Iustin Pop
      - ExpandNames is left as as purely a lock-related function
119 5bbd3f7f Michael Hanselmann
      - CheckPrereq is run after we have acquired locks (and possible
120 4be4691d Iustin Pop
        waited for them)
121 4be4691d Iustin Pop

122 4be4691d Iustin Pop
    The function is allowed to change the self.op attribute so that
123 4be4691d Iustin Pop
    later methods can no longer worry about missing parameters.
124 4be4691d Iustin Pop

125 4be4691d Iustin Pop
    """
126 4be4691d Iustin Pop
    pass
127 4be4691d Iustin Pop
128 d465bdc8 Guido Trotter
  def ExpandNames(self):
129 d465bdc8 Guido Trotter
    """Expand names for this LU.
130 d465bdc8 Guido Trotter

131 d465bdc8 Guido Trotter
    This method is called before starting to execute the opcode, and it should
132 d465bdc8 Guido Trotter
    update all the parameters of the opcode to their canonical form (e.g. a
133 d465bdc8 Guido Trotter
    short node name must be fully expanded after this method has successfully
134 d465bdc8 Guido Trotter
    completed). This way locking, hooks, logging, ecc. can work correctly.
135 d465bdc8 Guido Trotter

136 d465bdc8 Guido Trotter
    LUs which implement this method must also populate the self.needed_locks
137 d465bdc8 Guido Trotter
    member, as a dict with lock levels as keys, and a list of needed lock names
138 d465bdc8 Guido Trotter
    as values. Rules:
139 e4376078 Iustin Pop

140 e4376078 Iustin Pop
      - use an empty dict if you don't need any lock
141 e4376078 Iustin Pop
      - if you don't need any lock at a particular level omit that level
142 e4376078 Iustin Pop
      - don't put anything for the BGL level
143 e4376078 Iustin Pop
      - if you want all locks at a level use locking.ALL_SET as a value
144 d465bdc8 Guido Trotter

145 3977a4c1 Guido Trotter
    If you need to share locks (rather than acquire them exclusively) at one
146 3977a4c1 Guido Trotter
    level you can modify self.share_locks, setting a true value (usually 1) for
147 3977a4c1 Guido Trotter
    that level. By default locks are not shared.
148 3977a4c1 Guido Trotter

149 e4376078 Iustin Pop
    Examples::
150 e4376078 Iustin Pop

151 e4376078 Iustin Pop
      # Acquire all nodes and one instance
152 e4376078 Iustin Pop
      self.needed_locks = {
153 e4376078 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
154 e4376078 Iustin Pop
        locking.LEVEL_INSTANCE: ['instance1.example.tld'],
155 e4376078 Iustin Pop
      }
156 e4376078 Iustin Pop
      # Acquire just two nodes
157 e4376078 Iustin Pop
      self.needed_locks = {
158 e4376078 Iustin Pop
        locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
159 e4376078 Iustin Pop
      }
160 e4376078 Iustin Pop
      # Acquire no locks
161 e4376078 Iustin Pop
      self.needed_locks = {} # No, you can't leave it to the default value None
162 d465bdc8 Guido Trotter

163 d465bdc8 Guido Trotter
    """
164 d465bdc8 Guido Trotter
    # The implementation of this method is mandatory only if the new LU is
165 d465bdc8 Guido Trotter
    # concurrent, so that old LUs don't need to be changed all at the same
166 d465bdc8 Guido Trotter
    # time.
167 d465bdc8 Guido Trotter
    if self.REQ_BGL:
168 d465bdc8 Guido Trotter
      self.needed_locks = {} # Exclusive LUs don't need locks.
169 d465bdc8 Guido Trotter
    else:
170 d465bdc8 Guido Trotter
      raise NotImplementedError
171 d465bdc8 Guido Trotter
172 fb8dcb62 Guido Trotter
  def DeclareLocks(self, level):
173 fb8dcb62 Guido Trotter
    """Declare LU locking needs for a level
174 fb8dcb62 Guido Trotter

175 fb8dcb62 Guido Trotter
    While most LUs can just declare their locking needs at ExpandNames time,
176 fb8dcb62 Guido Trotter
    sometimes there's the need to calculate some locks after having acquired
177 fb8dcb62 Guido Trotter
    the ones before. This function is called just before acquiring locks at a
178 fb8dcb62 Guido Trotter
    particular level, but after acquiring the ones at lower levels, and permits
179 fb8dcb62 Guido Trotter
    such calculations. It can be used to modify self.needed_locks, and by
180 fb8dcb62 Guido Trotter
    default it does nothing.
181 fb8dcb62 Guido Trotter

182 fb8dcb62 Guido Trotter
    This function is only called if you have something already set in
183 fb8dcb62 Guido Trotter
    self.needed_locks for the level.
184 fb8dcb62 Guido Trotter

185 fb8dcb62 Guido Trotter
    @param level: Locking level which is going to be locked
186 fb8dcb62 Guido Trotter
    @type level: member of ganeti.locking.LEVELS
187 fb8dcb62 Guido Trotter

188 fb8dcb62 Guido Trotter
    """
189 fb8dcb62 Guido Trotter
190 a8083063 Iustin Pop
  def CheckPrereq(self):
191 a8083063 Iustin Pop
    """Check prerequisites for this LU.
192 a8083063 Iustin Pop

193 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
194 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
195 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
196 a8083063 Iustin Pop
    allowed.
197 a8083063 Iustin Pop

198 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
199 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
200 a8083063 Iustin Pop

201 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
202 d465bdc8 Guido Trotter
    their canonical form if it hasn't been done by ExpandNames before.
203 a8083063 Iustin Pop

204 a8083063 Iustin Pop
    """
205 a8083063 Iustin Pop
    raise NotImplementedError
206 a8083063 Iustin Pop
207 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
208 a8083063 Iustin Pop
    """Execute the LU.
209 a8083063 Iustin Pop

210 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
211 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
212 a8083063 Iustin Pop
    code, or expected.
213 a8083063 Iustin Pop

214 a8083063 Iustin Pop
    """
215 a8083063 Iustin Pop
    raise NotImplementedError
216 a8083063 Iustin Pop
217 a8083063 Iustin Pop
  def BuildHooksEnv(self):
218 a8083063 Iustin Pop
    """Build hooks environment for this LU.
219 a8083063 Iustin Pop

220 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
221 a8083063 Iustin Pop
    containing the environment that will be used for running the
222 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
223 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
224 a8083063 Iustin Pop
    the hook should run after the execution.
225 a8083063 Iustin Pop

226 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
227 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
228 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
229 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
230 a8083063 Iustin Pop

231 8a3fe350 Guido Trotter
    No nodes should be returned as an empty list (and not None).
232 a8083063 Iustin Pop

233 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
234 a8083063 Iustin Pop
    not be called.
235 a8083063 Iustin Pop

236 a8083063 Iustin Pop
    """
237 a8083063 Iustin Pop
    raise NotImplementedError
238 a8083063 Iustin Pop
239 1fce5219 Guido Trotter
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
240 1fce5219 Guido Trotter
    """Notify the LU about the results of its hooks.
241 1fce5219 Guido Trotter

242 1fce5219 Guido Trotter
    This method is called every time a hooks phase is executed, and notifies
243 1fce5219 Guido Trotter
    the Logical Unit about the hooks' result. The LU can then use it to alter
244 1fce5219 Guido Trotter
    its result based on the hooks.  By default the method does nothing and the
245 1fce5219 Guido Trotter
    previous result is passed back unchanged but any LU can define it if it
246 1fce5219 Guido Trotter
    wants to use the local cluster hook-scripts somehow.
247 1fce5219 Guido Trotter

248 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
249 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
250 e4376078 Iustin Pop
    @param hook_results: the results of the multi-node hooks rpc call
251 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
252 e4376078 Iustin Pop
    @param lu_result: the previous Exec result this LU had, or None
253 e4376078 Iustin Pop
        in the PRE phase
254 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
255 e4376078 Iustin Pop
        and hook results
256 1fce5219 Guido Trotter

257 1fce5219 Guido Trotter
    """
258 1fce5219 Guido Trotter
    return lu_result
259 1fce5219 Guido Trotter
260 43905206 Guido Trotter
  def _ExpandAndLockInstance(self):
261 43905206 Guido Trotter
    """Helper function to expand and lock an instance.
262 43905206 Guido Trotter

263 43905206 Guido Trotter
    Many LUs that work on an instance take its name in self.op.instance_name
264 43905206 Guido Trotter
    and need to expand it and then declare the expanded name for locking. This
265 43905206 Guido Trotter
    function does it, and then updates self.op.instance_name to the expanded
266 43905206 Guido Trotter
    name. It also initializes needed_locks as a dict, if this hasn't been done
267 43905206 Guido Trotter
    before.
268 43905206 Guido Trotter

269 43905206 Guido Trotter
    """
270 43905206 Guido Trotter
    if self.needed_locks is None:
271 43905206 Guido Trotter
      self.needed_locks = {}
272 43905206 Guido Trotter
    else:
273 43905206 Guido Trotter
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
274 43905206 Guido Trotter
        "_ExpandAndLockInstance called with instance-level locks set"
275 43905206 Guido Trotter
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
276 43905206 Guido Trotter
    if expanded_name is None:
277 43905206 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' not known" %
278 43905206 Guido Trotter
                                  self.op.instance_name)
279 43905206 Guido Trotter
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
280 43905206 Guido Trotter
    self.op.instance_name = expanded_name
281 43905206 Guido Trotter
282 a82ce292 Guido Trotter
  def _LockInstancesNodes(self, primary_only=False):
283 c4a2fee1 Guido Trotter
    """Helper function to declare instances' nodes for locking.
284 c4a2fee1 Guido Trotter

285 c4a2fee1 Guido Trotter
    This function should be called after locking one or more instances to lock
286 c4a2fee1 Guido Trotter
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
287 c4a2fee1 Guido Trotter
    with all primary or secondary nodes for instances already locked and
288 c4a2fee1 Guido Trotter
    present in self.needed_locks[locking.LEVEL_INSTANCE].
289 c4a2fee1 Guido Trotter

290 c4a2fee1 Guido Trotter
    It should be called from DeclareLocks, and for safety only works if
291 c4a2fee1 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] is set.
292 c4a2fee1 Guido Trotter

293 c4a2fee1 Guido Trotter
    In the future it may grow parameters to just lock some instance's nodes, or
294 c4a2fee1 Guido Trotter
    to just lock primaries or secondary nodes, if needed.
295 c4a2fee1 Guido Trotter

296 e4376078 Iustin Pop
    If should be called in DeclareLocks in a way similar to::
297 c4a2fee1 Guido Trotter

298 e4376078 Iustin Pop
      if level == locking.LEVEL_NODE:
299 e4376078 Iustin Pop
        self._LockInstancesNodes()
300 c4a2fee1 Guido Trotter

301 a82ce292 Guido Trotter
    @type primary_only: boolean
302 a82ce292 Guido Trotter
    @param primary_only: only lock primary nodes of locked instances
303 a82ce292 Guido Trotter

304 c4a2fee1 Guido Trotter
    """
305 c4a2fee1 Guido Trotter
    assert locking.LEVEL_NODE in self.recalculate_locks, \
306 c4a2fee1 Guido Trotter
      "_LockInstancesNodes helper function called with no nodes to recalculate"
307 c4a2fee1 Guido Trotter
308 c4a2fee1 Guido Trotter
    # TODO: check if we're really been called with the instance locks held
309 c4a2fee1 Guido Trotter
310 c4a2fee1 Guido Trotter
    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
311 c4a2fee1 Guido Trotter
    # future we might want to have different behaviors depending on the value
312 c4a2fee1 Guido Trotter
    # of self.recalculate_locks[locking.LEVEL_NODE]
313 c4a2fee1 Guido Trotter
    wanted_nodes = []
314 6683bba2 Guido Trotter
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
315 c4a2fee1 Guido Trotter
      instance = self.context.cfg.GetInstanceInfo(instance_name)
316 c4a2fee1 Guido Trotter
      wanted_nodes.append(instance.primary_node)
317 a82ce292 Guido Trotter
      if not primary_only:
318 a82ce292 Guido Trotter
        wanted_nodes.extend(instance.secondary_nodes)
319 9513b6ab Guido Trotter
320 9513b6ab Guido Trotter
    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
321 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
322 9513b6ab Guido Trotter
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
323 9513b6ab Guido Trotter
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
324 c4a2fee1 Guido Trotter
325 c4a2fee1 Guido Trotter
    del self.recalculate_locks[locking.LEVEL_NODE]
326 c4a2fee1 Guido Trotter
327 a8083063 Iustin Pop
328 fe267188 Iustin Pop
class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223
329 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
330 a8083063 Iustin Pop

331 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
332 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
333 a8083063 Iustin Pop

334 a8083063 Iustin Pop
  """
335 a8083063 Iustin Pop
  HPATH = None
336 a8083063 Iustin Pop
  HTYPE = None
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
339 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
340 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
341 83120a01 Michael Hanselmann

342 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
343 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
344 e4376078 Iustin Pop
  @type nodes: list
345 e4376078 Iustin Pop
  @param nodes: list of node names or None for all nodes
346 e4376078 Iustin Pop
  @rtype: list
347 e4376078 Iustin Pop
  @return: the list of nodes, sorted
348 e4376078 Iustin Pop
  @raise errors.OpProgrammerError: if the nodes parameter is wrong type
349 83120a01 Michael Hanselmann

350 83120a01 Michael Hanselmann
  """
351 3312b702 Iustin Pop
  if not isinstance(nodes, list):
352 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
353 dcb93971 Michael Hanselmann
354 ea47808a Guido Trotter
  if not nodes:
355 ea47808a Guido Trotter
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
356 ea47808a Guido Trotter
      " non-empty list of nodes whose name is to be expanded.")
357 dcb93971 Michael Hanselmann
358 ea47808a Guido Trotter
  wanted = []
359 ea47808a Guido Trotter
  for name in nodes:
360 ea47808a Guido Trotter
    node = lu.cfg.ExpandNodeName(name)
361 ea47808a Guido Trotter
    if node is None:
362 ea47808a Guido Trotter
      raise errors.OpPrereqError("No such node name '%s'" % name)
363 ea47808a Guido Trotter
    wanted.append(node)
364 dcb93971 Michael Hanselmann
365 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
366 3312b702 Iustin Pop
367 3312b702 Iustin Pop
368 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
369 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
370 3312b702 Iustin Pop

371 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
372 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
373 e4376078 Iustin Pop
  @type instances: list
374 e4376078 Iustin Pop
  @param instances: list of instance names or None for all instances
375 e4376078 Iustin Pop
  @rtype: list
376 e4376078 Iustin Pop
  @return: the list of instances, sorted
377 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if the instances parameter is wrong type
378 e4376078 Iustin Pop
  @raise errors.OpPrereqError: if any of the passed instances is not found
379 3312b702 Iustin Pop

380 3312b702 Iustin Pop
  """
381 3312b702 Iustin Pop
  if not isinstance(instances, list):
382 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
383 3312b702 Iustin Pop
384 3312b702 Iustin Pop
  if instances:
385 3312b702 Iustin Pop
    wanted = []
386 3312b702 Iustin Pop
387 3312b702 Iustin Pop
    for name in instances:
388 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
389 3312b702 Iustin Pop
      if instance is None:
390 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
391 3312b702 Iustin Pop
      wanted.append(instance)
392 3312b702 Iustin Pop
393 3312b702 Iustin Pop
  else:
394 a7f5dc98 Iustin Pop
    wanted = utils.NiceSort(lu.cfg.GetInstanceList())
395 a7f5dc98 Iustin Pop
  return wanted
396 dcb93971 Michael Hanselmann
397 dcb93971 Michael Hanselmann
398 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
399 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
400 83120a01 Michael Hanselmann

401 a2d2e1a7 Iustin Pop
  @type static: L{utils.FieldSet}
402 31bf511f Iustin Pop
  @param static: static fields set
403 a2d2e1a7 Iustin Pop
  @type dynamic: L{utils.FieldSet}
404 31bf511f Iustin Pop
  @param dynamic: dynamic fields set
405 83120a01 Michael Hanselmann

406 83120a01 Michael Hanselmann
  """
407 a2d2e1a7 Iustin Pop
  f = utils.FieldSet()
408 31bf511f Iustin Pop
  f.Extend(static)
409 31bf511f Iustin Pop
  f.Extend(dynamic)
410 dcb93971 Michael Hanselmann
411 31bf511f Iustin Pop
  delta = f.NonMatching(selected)
412 31bf511f Iustin Pop
  if delta:
413 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
414 31bf511f Iustin Pop
                               % ",".join(delta))
415 dcb93971 Michael Hanselmann
416 dcb93971 Michael Hanselmann
417 a5961235 Iustin Pop
def _CheckBooleanOpField(op, name):
418 a5961235 Iustin Pop
  """Validates boolean opcode parameters.
419 a5961235 Iustin Pop

420 a5961235 Iustin Pop
  This will ensure that an opcode parameter is either a boolean value,
421 a5961235 Iustin Pop
  or None (but that it always exists).
422 a5961235 Iustin Pop

423 a5961235 Iustin Pop
  """
424 a5961235 Iustin Pop
  val = getattr(op, name, None)
425 a5961235 Iustin Pop
  if not (val is None or isinstance(val, bool)):
426 a5961235 Iustin Pop
    raise errors.OpPrereqError("Invalid boolean parameter '%s' (%s)" %
427 a5961235 Iustin Pop
                               (name, str(val)))
428 a5961235 Iustin Pop
  setattr(op, name, val)
429 a5961235 Iustin Pop
430 a5961235 Iustin Pop
431 a5961235 Iustin Pop
def _CheckNodeOnline(lu, node):
432 a5961235 Iustin Pop
  """Ensure that a given node is online.
433 a5961235 Iustin Pop

434 a5961235 Iustin Pop
  @param lu: the LU on behalf of which we make the check
435 a5961235 Iustin Pop
  @param node: the node to check
436 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is offline
437 a5961235 Iustin Pop

438 a5961235 Iustin Pop
  """
439 a5961235 Iustin Pop
  if lu.cfg.GetNodeInfo(node).offline:
440 a5961235 Iustin Pop
    raise errors.OpPrereqError("Can't use offline node %s" % node)
441 a5961235 Iustin Pop
442 a5961235 Iustin Pop
443 733a2b6a Iustin Pop
def _CheckNodeNotDrained(lu, node):
444 733a2b6a Iustin Pop
  """Ensure that a given node is not drained.
445 733a2b6a Iustin Pop

446 733a2b6a Iustin Pop
  @param lu: the LU on behalf of which we make the check
447 733a2b6a Iustin Pop
  @param node: the node to check
448 733a2b6a Iustin Pop
  @raise errors.OpPrereqError: if the node is drained
449 733a2b6a Iustin Pop

450 733a2b6a Iustin Pop
  """
451 733a2b6a Iustin Pop
  if lu.cfg.GetNodeInfo(node).drained:
452 733a2b6a Iustin Pop
    raise errors.OpPrereqError("Can't use drained node %s" % node)
453 733a2b6a Iustin Pop
454 733a2b6a Iustin Pop
455 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 67fc3042 Iustin Pop
                          memory, vcpus, nics, disk_template, disks,
457 7c4d6c7b Michael Hanselmann
                          bep, hvp, hypervisor_name):
458 e4376078 Iustin Pop
  """Builds instance related env variables for hooks
459 e4376078 Iustin Pop

460 e4376078 Iustin Pop
  This builds the hook environment from individual variables.
461 e4376078 Iustin Pop

462 e4376078 Iustin Pop
  @type name: string
463 e4376078 Iustin Pop
  @param name: the name of the instance
464 e4376078 Iustin Pop
  @type primary_node: string
465 e4376078 Iustin Pop
  @param primary_node: the name of the instance's primary node
466 e4376078 Iustin Pop
  @type secondary_nodes: list
467 e4376078 Iustin Pop
  @param secondary_nodes: list of secondary nodes as strings
468 e4376078 Iustin Pop
  @type os_type: string
469 e4376078 Iustin Pop
  @param os_type: the name of the instance's OS
470 0d68c45d Iustin Pop
  @type status: boolean
471 0d68c45d Iustin Pop
  @param status: the should_run status of the instance
472 e4376078 Iustin Pop
  @type memory: string
473 e4376078 Iustin Pop
  @param memory: the memory size of the instance
474 e4376078 Iustin Pop
  @type vcpus: string
475 e4376078 Iustin Pop
  @param vcpus: the count of VCPUs the instance has
476 e4376078 Iustin Pop
  @type nics: list
477 e4376078 Iustin Pop
  @param nics: list of tuples (ip, bridge, mac) representing
478 e4376078 Iustin Pop
      the NICs the instance  has
479 2c2690c9 Iustin Pop
  @type disk_template: string
480 5bbd3f7f Michael Hanselmann
  @param disk_template: the disk template of the instance
481 2c2690c9 Iustin Pop
  @type disks: list
482 2c2690c9 Iustin Pop
  @param disks: the list of (size, mode) pairs
483 67fc3042 Iustin Pop
  @type bep: dict
484 67fc3042 Iustin Pop
  @param bep: the backend parameters for the instance
485 67fc3042 Iustin Pop
  @type hvp: dict
486 67fc3042 Iustin Pop
  @param hvp: the hypervisor parameters for the instance
487 7c4d6c7b Michael Hanselmann
  @type hypervisor_name: string
488 7c4d6c7b Michael Hanselmann
  @param hypervisor_name: the hypervisor for the instance
489 e4376078 Iustin Pop
  @rtype: dict
490 e4376078 Iustin Pop
  @return: the hook environment for this instance
491 ecb215b5 Michael Hanselmann

492 396e1b78 Michael Hanselmann
  """
493 0d68c45d Iustin Pop
  if status:
494 0d68c45d Iustin Pop
    str_status = "up"
495 0d68c45d Iustin Pop
  else:
496 0d68c45d Iustin Pop
    str_status = "down"
497 396e1b78 Michael Hanselmann
  env = {
498 0e137c28 Iustin Pop
    "OP_TARGET": name,
499 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
500 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
501 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
502 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
503 0d68c45d Iustin Pop
    "INSTANCE_STATUS": str_status,
504 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
505 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
506 2c2690c9 Iustin Pop
    "INSTANCE_DISK_TEMPLATE": disk_template,
507 7c4d6c7b Michael Hanselmann
    "INSTANCE_HYPERVISOR": hypervisor_name,
508 396e1b78 Michael Hanselmann
  }
509 396e1b78 Michael Hanselmann
510 396e1b78 Michael Hanselmann
  if nics:
511 396e1b78 Michael Hanselmann
    nic_count = len(nics)
512 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
513 396e1b78 Michael Hanselmann
      if ip is None:
514 396e1b78 Michael Hanselmann
        ip = ""
515 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
516 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
517 2c2690c9 Iustin Pop
      env["INSTANCE_NIC%d_MAC" % idx] = mac
518 396e1b78 Michael Hanselmann
  else:
519 396e1b78 Michael Hanselmann
    nic_count = 0
520 396e1b78 Michael Hanselmann
521 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
522 396e1b78 Michael Hanselmann
523 2c2690c9 Iustin Pop
  if disks:
524 2c2690c9 Iustin Pop
    disk_count = len(disks)
525 2c2690c9 Iustin Pop
    for idx, (size, mode) in enumerate(disks):
526 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_SIZE" % idx] = size
527 2c2690c9 Iustin Pop
      env["INSTANCE_DISK%d_MODE" % idx] = mode
528 2c2690c9 Iustin Pop
  else:
529 2c2690c9 Iustin Pop
    disk_count = 0
530 2c2690c9 Iustin Pop
531 2c2690c9 Iustin Pop
  env["INSTANCE_DISK_COUNT"] = disk_count
532 2c2690c9 Iustin Pop
533 67fc3042 Iustin Pop
  for source, kind in [(bep, "BE"), (hvp, "HV")]:
534 67fc3042 Iustin Pop
    for key, value in source.items():
535 67fc3042 Iustin Pop
      env["INSTANCE_%s_%s" % (kind, key)] = value
536 67fc3042 Iustin Pop
537 396e1b78 Michael Hanselmann
  return env
538 396e1b78 Michael Hanselmann
539 396e1b78 Michael Hanselmann
540 338e51e8 Iustin Pop
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
541 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
542 ecb215b5 Michael Hanselmann

543 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
544 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
545 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
546 e4376078 Iustin Pop
  @param instance: the instance for which we should build the
547 e4376078 Iustin Pop
      environment
548 e4376078 Iustin Pop
  @type override: dict
549 e4376078 Iustin Pop
  @param override: dictionary with key/values that will override
550 e4376078 Iustin Pop
      our values
551 e4376078 Iustin Pop
  @rtype: dict
552 e4376078 Iustin Pop
  @return: the hook environment dictionary
553 e4376078 Iustin Pop

554 ecb215b5 Michael Hanselmann
  """
555 67fc3042 Iustin Pop
  cluster = lu.cfg.GetClusterInfo()
556 67fc3042 Iustin Pop
  bep = cluster.FillBE(instance)
557 67fc3042 Iustin Pop
  hvp = cluster.FillHV(instance)
558 396e1b78 Michael Hanselmann
  args = {
559 396e1b78 Michael Hanselmann
    'name': instance.name,
560 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
561 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
562 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
563 0d68c45d Iustin Pop
    'status': instance.admin_up,
564 338e51e8 Iustin Pop
    'memory': bep[constants.BE_MEMORY],
565 338e51e8 Iustin Pop
    'vcpus': bep[constants.BE_VCPUS],
566 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
567 2c2690c9 Iustin Pop
    'disk_template': instance.disk_template,
568 2c2690c9 Iustin Pop
    'disks': [(disk.size, disk.mode) for disk in instance.disks],
569 67fc3042 Iustin Pop
    'bep': bep,
570 67fc3042 Iustin Pop
    'hvp': hvp,
571 b0c63e2b Iustin Pop
    'hypervisor_name': instance.hypervisor,
572 396e1b78 Michael Hanselmann
  }
573 396e1b78 Michael Hanselmann
  if override:
574 396e1b78 Michael Hanselmann
    args.update(override)
575 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
576 396e1b78 Michael Hanselmann
577 396e1b78 Michael Hanselmann
578 ec0292f1 Iustin Pop
def _AdjustCandidatePool(lu):
579 ec0292f1 Iustin Pop
  """Adjust the candidate pool after node operations.
580 ec0292f1 Iustin Pop

581 ec0292f1 Iustin Pop
  """
582 ec0292f1 Iustin Pop
  mod_list = lu.cfg.MaintainCandidatePool()
583 ec0292f1 Iustin Pop
  if mod_list:
584 ec0292f1 Iustin Pop
    lu.LogInfo("Promoted nodes to master candidate role: %s",
585 ee513a66 Iustin Pop
               ", ".join(node.name for node in mod_list))
586 ec0292f1 Iustin Pop
    for name in mod_list:
587 ec0292f1 Iustin Pop
      lu.context.ReaddNode(name)
588 ec0292f1 Iustin Pop
  mc_now, mc_max = lu.cfg.GetMasterCandidateStats()
589 ec0292f1 Iustin Pop
  if mc_now > mc_max:
590 ec0292f1 Iustin Pop
    lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
591 ec0292f1 Iustin Pop
               (mc_now, mc_max))
592 ec0292f1 Iustin Pop
593 ec0292f1 Iustin Pop
594 b9bddb6b Iustin Pop
def _CheckInstanceBridgesExist(lu, instance):
595 5bbd3f7f Michael Hanselmann
  """Check that the bridges needed by an instance exist.
596 bf6929a2 Alexander Schreiber

597 bf6929a2 Alexander Schreiber
  """
598 5bbd3f7f Michael Hanselmann
  # check bridges existence
599 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
600 781de953 Iustin Pop
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
601 781de953 Iustin Pop
  result.Raise()
602 781de953 Iustin Pop
  if not result.data:
603 781de953 Iustin Pop
    raise errors.OpPrereqError("One or more target bridges %s does not"
604 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
605 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
606 bf6929a2 Alexander Schreiber
607 bf6929a2 Alexander Schreiber
608 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
609 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
610 a8083063 Iustin Pop

611 a8083063 Iustin Pop
  """
612 a8083063 Iustin Pop
  _OP_REQP = []
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
  def CheckPrereq(self):
615 a8083063 Iustin Pop
    """Check prerequisites.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    This checks whether the cluster is empty.
618 a8083063 Iustin Pop

619 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    """
622 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
623 a8083063 Iustin Pop
624 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
625 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
626 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
627 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
628 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
629 db915bd1 Michael Hanselmann
    if instancelist:
630 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
631 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
634 a8083063 Iustin Pop
    """Destroys the cluster.
635 a8083063 Iustin Pop

636 a8083063 Iustin Pop
    """
637 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
638 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
639 781de953 Iustin Pop
    result.Raise()
640 781de953 Iustin Pop
    if not result.data:
641 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
642 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
643 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
644 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
645 140aa4a8 Iustin Pop
    return master
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
648 d8fff41c Guido Trotter
class LUVerifyCluster(LogicalUnit):
649 a8083063 Iustin Pop
  """Verifies the cluster status.
650 a8083063 Iustin Pop

651 a8083063 Iustin Pop
  """
652 d8fff41c Guido Trotter
  HPATH = "cluster-verify"
653 d8fff41c Guido Trotter
  HTYPE = constants.HTYPE_CLUSTER
654 e54c4c5e Guido Trotter
  _OP_REQP = ["skip_checks"]
655 d4b9d97f Guido Trotter
  REQ_BGL = False
656 d4b9d97f Guido Trotter
657 d4b9d97f Guido Trotter
  def ExpandNames(self):
658 d4b9d97f Guido Trotter
    self.needed_locks = {
659 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
660 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
661 d4b9d97f Guido Trotter
    }
662 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
663 a8083063 Iustin Pop
664 25361b9a Iustin Pop
  def _VerifyNode(self, nodeinfo, file_list, local_cksum,
665 6d2e83d5 Iustin Pop
                  node_result, feedback_fn, master_files,
666 cc9e1230 Guido Trotter
                  drbd_map, vg_name):
667 a8083063 Iustin Pop
    """Run multiple tests against a node.
668 a8083063 Iustin Pop

669 112f18a5 Iustin Pop
    Test list:
670 e4376078 Iustin Pop

671 a8083063 Iustin Pop
      - compares ganeti version
672 5bbd3f7f Michael Hanselmann
      - checks vg existence and size > 20G
673 a8083063 Iustin Pop
      - checks config file checksum
674 a8083063 Iustin Pop
      - checks ssh to other nodes
675 a8083063 Iustin Pop

676 112f18a5 Iustin Pop
    @type nodeinfo: L{objects.Node}
677 112f18a5 Iustin Pop
    @param nodeinfo: the node to check
678 e4376078 Iustin Pop
    @param file_list: required list of files
679 e4376078 Iustin Pop
    @param local_cksum: dictionary of local files and their checksums
680 e4376078 Iustin Pop
    @param node_result: the results from the node
681 e4376078 Iustin Pop
    @param feedback_fn: function used to accumulate results
682 112f18a5 Iustin Pop
    @param master_files: list of files that only masters should have
683 6d2e83d5 Iustin Pop
    @param drbd_map: the useddrbd minors for this node, in
684 6d2e83d5 Iustin Pop
        form of minor: (instance, must_exist) which correspond to instances
685 6d2e83d5 Iustin Pop
        and their running status
686 cc9e1230 Guido Trotter
    @param vg_name: Ganeti Volume Group (result of self.cfg.GetVGName())
687 098c0958 Michael Hanselmann

688 a8083063 Iustin Pop
    """
689 112f18a5 Iustin Pop
    node = nodeinfo.name
690 25361b9a Iustin Pop
691 25361b9a Iustin Pop
    # main result, node_result should be a non-empty dict
692 25361b9a Iustin Pop
    if not node_result or not isinstance(node_result, dict):
693 25361b9a Iustin Pop
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
694 25361b9a Iustin Pop
      return True
695 25361b9a Iustin Pop
696 a8083063 Iustin Pop
    # compares ganeti version
697 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
698 25361b9a Iustin Pop
    remote_version = node_result.get('version', None)
699 e9ce0a64 Iustin Pop
    if not (remote_version and isinstance(remote_version, (list, tuple)) and
700 e9ce0a64 Iustin Pop
            len(remote_version) == 2):
701 c840ae6f Guido Trotter
      feedback_fn("  - ERROR: connection to %s failed" % (node))
702 a8083063 Iustin Pop
      return True
703 a8083063 Iustin Pop
704 e9ce0a64 Iustin Pop
    if local_version != remote_version[0]:
705 e9ce0a64 Iustin Pop
      feedback_fn("  - ERROR: incompatible protocol versions: master %s,"
706 e9ce0a64 Iustin Pop
                  " node %s %s" % (local_version, node, remote_version[0]))
707 a8083063 Iustin Pop
      return True
708 a8083063 Iustin Pop
709 e9ce0a64 Iustin Pop
    # node seems compatible, we can actually try to look into its results
710 a8083063 Iustin Pop
711 a8083063 Iustin Pop
    bad = False
712 e9ce0a64 Iustin Pop
713 e9ce0a64 Iustin Pop
    # full package version
714 e9ce0a64 Iustin Pop
    if constants.RELEASE_VERSION != remote_version[1]:
715 e9ce0a64 Iustin Pop
      feedback_fn("  - WARNING: software version mismatch: master %s,"
716 e9ce0a64 Iustin Pop
                  " node %s %s" %
717 e9ce0a64 Iustin Pop
                  (constants.RELEASE_VERSION, node, remote_version[1]))
718 e9ce0a64 Iustin Pop
719 e9ce0a64 Iustin Pop
    # checks vg existence and size > 20G
720 cc9e1230 Guido Trotter
    if vg_name is not None:
721 cc9e1230 Guido Trotter
      vglist = node_result.get(constants.NV_VGLIST, None)
722 cc9e1230 Guido Trotter
      if not vglist:
723 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: unable to check volume groups on node %s." %
724 cc9e1230 Guido Trotter
                        (node,))
725 a8083063 Iustin Pop
        bad = True
726 cc9e1230 Guido Trotter
      else:
727 cc9e1230 Guido Trotter
        vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
728 cc9e1230 Guido Trotter
                                              constants.MIN_VG_SIZE)
729 cc9e1230 Guido Trotter
        if vgstatus:
730 cc9e1230 Guido Trotter
          feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
731 cc9e1230 Guido Trotter
          bad = True
732 a8083063 Iustin Pop
733 a8083063 Iustin Pop
    # checks config file checksum
734 a8083063 Iustin Pop
735 25361b9a Iustin Pop
    remote_cksum = node_result.get(constants.NV_FILELIST, None)
736 25361b9a Iustin Pop
    if not isinstance(remote_cksum, dict):
737 a8083063 Iustin Pop
      bad = True
738 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
739 a8083063 Iustin Pop
    else:
740 a8083063 Iustin Pop
      for file_name in file_list:
741 112f18a5 Iustin Pop
        node_is_mc = nodeinfo.master_candidate
742 112f18a5 Iustin Pop
        must_have_file = file_name not in master_files
743 a8083063 Iustin Pop
        if file_name not in remote_cksum:
744 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
745 112f18a5 Iustin Pop
            bad = True
746 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' missing" % file_name)
747 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
748 112f18a5 Iustin Pop
          if node_is_mc or must_have_file:
749 112f18a5 Iustin Pop
            bad = True
750 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
751 112f18a5 Iustin Pop
          else:
752 112f18a5 Iustin Pop
            # not candidate and this is not a must-have file
753 112f18a5 Iustin Pop
            bad = True
754 e631cb25 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
755 e631cb25 Iustin Pop
                        " candidates (and the file is outdated)" % file_name)
756 112f18a5 Iustin Pop
        else:
757 112f18a5 Iustin Pop
          # all good, except non-master/non-must have combination
758 112f18a5 Iustin Pop
          if not node_is_mc and not must_have_file:
759 112f18a5 Iustin Pop
            feedback_fn("  - ERROR: file '%s' should not exist on non master"
760 112f18a5 Iustin Pop
                        " candidates" % file_name)
761 a8083063 Iustin Pop
762 25361b9a Iustin Pop
    # checks ssh to any
763 25361b9a Iustin Pop
764 25361b9a Iustin Pop
    if constants.NV_NODELIST not in node_result:
765 a8083063 Iustin Pop
      bad = True
766 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
767 a8083063 Iustin Pop
    else:
768 25361b9a Iustin Pop
      if node_result[constants.NV_NODELIST]:
769 a8083063 Iustin Pop
        bad = True
770 25361b9a Iustin Pop
        for node in node_result[constants.NV_NODELIST]:
771 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
772 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODELIST][node]))
773 25361b9a Iustin Pop
774 25361b9a Iustin Pop
    if constants.NV_NODENETTEST not in node_result:
775 9d4bfc96 Iustin Pop
      bad = True
776 9d4bfc96 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
777 9d4bfc96 Iustin Pop
    else:
778 25361b9a Iustin Pop
      if node_result[constants.NV_NODENETTEST]:
779 9d4bfc96 Iustin Pop
        bad = True
780 25361b9a Iustin Pop
        nlist = utils.NiceSort(node_result[constants.NV_NODENETTEST].keys())
781 9d4bfc96 Iustin Pop
        for node in nlist:
782 9d4bfc96 Iustin Pop
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
783 25361b9a Iustin Pop
                          (node, node_result[constants.NV_NODENETTEST][node]))
784 9d4bfc96 Iustin Pop
785 25361b9a Iustin Pop
    hyp_result = node_result.get(constants.NV_HYPERVISOR, None)
786 e69d05fd Iustin Pop
    if isinstance(hyp_result, dict):
787 e69d05fd Iustin Pop
      for hv_name, hv_result in hyp_result.iteritems():
788 e69d05fd Iustin Pop
        if hv_result is not None:
789 e69d05fd Iustin Pop
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
790 e69d05fd Iustin Pop
                      (hv_name, hv_result))
791 6d2e83d5 Iustin Pop
792 6d2e83d5 Iustin Pop
    # check used drbd list
793 cc9e1230 Guido Trotter
    if vg_name is not None:
794 cc9e1230 Guido Trotter
      used_minors = node_result.get(constants.NV_DRBDLIST, [])
795 cc9e1230 Guido Trotter
      if not isinstance(used_minors, (tuple, list)):
796 cc9e1230 Guido Trotter
        feedback_fn("  - ERROR: cannot parse drbd status file: %s" %
797 cc9e1230 Guido Trotter
                    str(used_minors))
798 cc9e1230 Guido Trotter
      else:
799 cc9e1230 Guido Trotter
        for minor, (iname, must_exist) in drbd_map.items():
800 cc9e1230 Guido Trotter
          if minor not in used_minors and must_exist:
801 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: drbd minor %d of instance %s is"
802 35e994e9 Iustin Pop
                        " not active" % (minor, iname))
803 cc9e1230 Guido Trotter
            bad = True
804 cc9e1230 Guido Trotter
        for minor in used_minors:
805 cc9e1230 Guido Trotter
          if minor not in drbd_map:
806 35e994e9 Iustin Pop
            feedback_fn("  - ERROR: unallocated drbd minor %d is in use" %
807 35e994e9 Iustin Pop
                        minor)
808 cc9e1230 Guido Trotter
            bad = True
809 6d2e83d5 Iustin Pop
810 a8083063 Iustin Pop
    return bad
811 a8083063 Iustin Pop
812 c5705f58 Guido Trotter
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
813 0a66c968 Iustin Pop
                      node_instance, feedback_fn, n_offline):
814 a8083063 Iustin Pop
    """Verify an instance.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    This function checks to see if the required block devices are
817 a8083063 Iustin Pop
    available on the instance's node.
818 a8083063 Iustin Pop

819 a8083063 Iustin Pop
    """
820 a8083063 Iustin Pop
    bad = False
821 a8083063 Iustin Pop
822 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    node_vol_should = {}
825 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    for node in node_vol_should:
828 0a66c968 Iustin Pop
      if node in n_offline:
829 0a66c968 Iustin Pop
        # ignore missing volumes on offline nodes
830 0a66c968 Iustin Pop
        continue
831 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
832 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
833 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
834 a8083063 Iustin Pop
                          (volume, node))
835 a8083063 Iustin Pop
          bad = True
836 a8083063 Iustin Pop
837 0d68c45d Iustin Pop
    if instanceconfig.admin_up:
838 0a66c968 Iustin Pop
      if ((node_current not in node_instance or
839 0a66c968 Iustin Pop
          not instance in node_instance[node_current]) and
840 0a66c968 Iustin Pop
          node_current not in n_offline):
841 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
842 a8083063 Iustin Pop
                        (instance, node_current))
843 a8083063 Iustin Pop
        bad = True
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
    for node in node_instance:
846 a8083063 Iustin Pop
      if (not node == node_current):
847 a8083063 Iustin Pop
        if instance in node_instance[node]:
848 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
849 a8083063 Iustin Pop
                          (instance, node))
850 a8083063 Iustin Pop
          bad = True
851 a8083063 Iustin Pop
852 6a438c98 Michael Hanselmann
    return bad
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
855 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
858 a8083063 Iustin Pop
    reported as unknown.
859 a8083063 Iustin Pop

860 a8083063 Iustin Pop
    """
861 a8083063 Iustin Pop
    bad = False
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
    for node in node_vol_is:
864 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
865 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
866 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
867 a8083063 Iustin Pop
                      (volume, node))
868 a8083063 Iustin Pop
          bad = True
869 a8083063 Iustin Pop
    return bad
870 a8083063 Iustin Pop
871 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
872 a8083063 Iustin Pop
    """Verify the list of running instances.
873 a8083063 Iustin Pop

874 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
875 a8083063 Iustin Pop

876 a8083063 Iustin Pop
    """
877 a8083063 Iustin Pop
    bad = False
878 a8083063 Iustin Pop
    for node in node_instance:
879 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
880 a8083063 Iustin Pop
        if runninginstance not in instancelist:
881 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
882 a8083063 Iustin Pop
                          (runninginstance, node))
883 a8083063 Iustin Pop
          bad = True
884 a8083063 Iustin Pop
    return bad
885 a8083063 Iustin Pop
886 2b3b6ddd Guido Trotter
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
887 2b3b6ddd Guido Trotter
    """Verify N+1 Memory Resilience.
888 2b3b6ddd Guido Trotter

889 2b3b6ddd Guido Trotter
    Check that if one single node dies we can still start all the instances it
890 2b3b6ddd Guido Trotter
    was primary for.
891 2b3b6ddd Guido Trotter

892 2b3b6ddd Guido Trotter
    """
893 2b3b6ddd Guido Trotter
    bad = False
894 2b3b6ddd Guido Trotter
895 2b3b6ddd Guido Trotter
    for node, nodeinfo in node_info.iteritems():
896 2b3b6ddd Guido Trotter
      # This code checks that every node which is now listed as secondary has
897 2b3b6ddd Guido Trotter
      # enough memory to host all instances it is supposed to should a single
898 2b3b6ddd Guido Trotter
      # other node in the cluster fail.
899 2b3b6ddd Guido Trotter
      # FIXME: not ready for failover to an arbitrary node
900 2b3b6ddd Guido Trotter
      # FIXME: does not support file-backed instances
901 2b3b6ddd Guido Trotter
      # WARNING: we currently take into account down instances as well as up
902 2b3b6ddd Guido Trotter
      # ones, considering that even if they're down someone might want to start
903 2b3b6ddd Guido Trotter
      # them even in the event of a node failure.
904 2b3b6ddd Guido Trotter
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
905 2b3b6ddd Guido Trotter
        needed_mem = 0
906 2b3b6ddd Guido Trotter
        for instance in instances:
907 338e51e8 Iustin Pop
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
908 c0f2b229 Iustin Pop
          if bep[constants.BE_AUTO_BALANCE]:
909 3924700f Iustin Pop
            needed_mem += bep[constants.BE_MEMORY]
910 2b3b6ddd Guido Trotter
        if nodeinfo['mfree'] < needed_mem:
911 5bbd3f7f Michael Hanselmann
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
912 2b3b6ddd Guido Trotter
                      " failovers should node %s fail" % (node, prinode))
913 2b3b6ddd Guido Trotter
          bad = True
914 2b3b6ddd Guido Trotter
    return bad
915 2b3b6ddd Guido Trotter
916 a8083063 Iustin Pop
  def CheckPrereq(self):
917 a8083063 Iustin Pop
    """Check prerequisites.
918 a8083063 Iustin Pop

919 e54c4c5e Guido Trotter
    Transform the list of checks we're going to skip into a set and check that
920 e54c4c5e Guido Trotter
    all its members are valid.
921 a8083063 Iustin Pop

922 a8083063 Iustin Pop
    """
923 e54c4c5e Guido Trotter
    self.skip_set = frozenset(self.op.skip_checks)
924 e54c4c5e Guido Trotter
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
925 e54c4c5e Guido Trotter
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
926 a8083063 Iustin Pop
927 d8fff41c Guido Trotter
  def BuildHooksEnv(self):
928 d8fff41c Guido Trotter
    """Build hooks env.
929 d8fff41c Guido Trotter

930 5bbd3f7f Michael Hanselmann
    Cluster-Verify hooks just ran in the post phase and their failure makes
931 d8fff41c Guido Trotter
    the output be logged in the verify output and the verification to fail.
932 d8fff41c Guido Trotter

933 d8fff41c Guido Trotter
    """
934 d8fff41c Guido Trotter
    all_nodes = self.cfg.GetNodeList()
935 35e994e9 Iustin Pop
    env = {
936 35e994e9 Iustin Pop
      "CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags())
937 35e994e9 Iustin Pop
      }
938 35e994e9 Iustin Pop
    for node in self.cfg.GetAllNodesInfo().values():
939 35e994e9 Iustin Pop
      env["NODE_TAGS_%s" % node.name] = " ".join(node.GetTags())
940 35e994e9 Iustin Pop
941 d8fff41c Guido Trotter
    return env, [], all_nodes
942 d8fff41c Guido Trotter
943 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
944 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
945 a8083063 Iustin Pop

946 a8083063 Iustin Pop
    """
947 a8083063 Iustin Pop
    bad = False
948 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
949 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
950 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
951 a8083063 Iustin Pop
952 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
953 e69d05fd Iustin Pop
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
954 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
955 9d4bfc96 Iustin Pop
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
956 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
957 6d2e83d5 Iustin Pop
    instanceinfo = dict((iname, self.cfg.GetInstanceInfo(iname))
958 6d2e83d5 Iustin Pop
                        for iname in instancelist)
959 93e4c50b Guido Trotter
    i_non_redundant = [] # Non redundant instances
960 3924700f Iustin Pop
    i_non_a_balanced = [] # Non auto-balanced instances
961 0a66c968 Iustin Pop
    n_offline = [] # List of offline nodes
962 22f0f71d Iustin Pop
    n_drained = [] # List of nodes being drained
963 a8083063 Iustin Pop
    node_volume = {}
964 a8083063 Iustin Pop
    node_instance = {}
965 9c9c7d30 Guido Trotter
    node_info = {}
966 26b6af5e Guido Trotter
    instance_cfg = {}
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
    # FIXME: verify OS list
969 a8083063 Iustin Pop
    # do local checksums
970 112f18a5 Iustin Pop
    master_files = [constants.CLUSTER_CONF_FILE]
971 112f18a5 Iustin Pop
972 112f18a5 Iustin Pop
    file_names = ssconf.SimpleStore().GetFileList()
973 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
974 699777f2 Michael Hanselmann
    file_names.append(constants.RAPI_CERT_FILE)
975 112f18a5 Iustin Pop
    file_names.extend(master_files)
976 112f18a5 Iustin Pop
977 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
978 a8083063 Iustin Pop
979 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
980 a8083063 Iustin Pop
    node_verify_param = {
981 25361b9a Iustin Pop
      constants.NV_FILELIST: file_names,
982 82e37788 Iustin Pop
      constants.NV_NODELIST: [node.name for node in nodeinfo
983 82e37788 Iustin Pop
                              if not node.offline],
984 25361b9a Iustin Pop
      constants.NV_HYPERVISOR: hypervisors,
985 25361b9a Iustin Pop
      constants.NV_NODENETTEST: [(node.name, node.primary_ip,
986 82e37788 Iustin Pop
                                  node.secondary_ip) for node in nodeinfo
987 82e37788 Iustin Pop
                                 if not node.offline],
988 25361b9a Iustin Pop
      constants.NV_INSTANCELIST: hypervisors,
989 25361b9a Iustin Pop
      constants.NV_VERSION: None,
990 25361b9a Iustin Pop
      constants.NV_HVINFO: self.cfg.GetHypervisorType(),
991 a8083063 Iustin Pop
      }
992 cc9e1230 Guido Trotter
    if vg_name is not None:
993 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_VGLIST] = None
994 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_LVLIST] = vg_name
995 cc9e1230 Guido Trotter
      node_verify_param[constants.NV_DRBDLIST] = None
996 72737a7f Iustin Pop
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
997 72737a7f Iustin Pop
                                           self.cfg.GetClusterName())
998 a8083063 Iustin Pop
999 3924700f Iustin Pop
    cluster = self.cfg.GetClusterInfo()
1000 112f18a5 Iustin Pop
    master_node = self.cfg.GetMasterNode()
1001 6d2e83d5 Iustin Pop
    all_drbd_map = self.cfg.ComputeDRBDMap()
1002 6d2e83d5 Iustin Pop
1003 112f18a5 Iustin Pop
    for node_i in nodeinfo:
1004 112f18a5 Iustin Pop
      node = node_i.name
1005 25361b9a Iustin Pop
      nresult = all_nvinfo[node].data
1006 25361b9a Iustin Pop
1007 0a66c968 Iustin Pop
      if node_i.offline:
1008 0a66c968 Iustin Pop
        feedback_fn("* Skipping offline node %s" % (node,))
1009 0a66c968 Iustin Pop
        n_offline.append(node)
1010 0a66c968 Iustin Pop
        continue
1011 0a66c968 Iustin Pop
1012 112f18a5 Iustin Pop
      if node == master_node:
1013 25361b9a Iustin Pop
        ntype = "master"
1014 112f18a5 Iustin Pop
      elif node_i.master_candidate:
1015 25361b9a Iustin Pop
        ntype = "master candidate"
1016 22f0f71d Iustin Pop
      elif node_i.drained:
1017 22f0f71d Iustin Pop
        ntype = "drained"
1018 22f0f71d Iustin Pop
        n_drained.append(node)
1019 112f18a5 Iustin Pop
      else:
1020 25361b9a Iustin Pop
        ntype = "regular"
1021 112f18a5 Iustin Pop
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1022 25361b9a Iustin Pop
1023 25361b9a Iustin Pop
      if all_nvinfo[node].failed or not isinstance(nresult, dict):
1024 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
1025 25361b9a Iustin Pop
        bad = True
1026 25361b9a Iustin Pop
        continue
1027 25361b9a Iustin Pop
1028 6d2e83d5 Iustin Pop
      node_drbd = {}
1029 6d2e83d5 Iustin Pop
      for minor, instance in all_drbd_map[node].items():
1030 c614e5fb Iustin Pop
        if instance not in instanceinfo:
1031 c614e5fb Iustin Pop
          feedback_fn("  - ERROR: ghost instance '%s' in temporary DRBD map" %
1032 c614e5fb Iustin Pop
                      instance)
1033 c614e5fb Iustin Pop
          # ghost instance should not be running, but otherwise we
1034 c614e5fb Iustin Pop
          # don't give double warnings (both ghost instance and
1035 c614e5fb Iustin Pop
          # unallocated minor in use)
1036 c614e5fb Iustin Pop
          node_drbd[minor] = (instance, False)
1037 c614e5fb Iustin Pop
        else:
1038 c614e5fb Iustin Pop
          instance = instanceinfo[instance]
1039 c614e5fb Iustin Pop
          node_drbd[minor] = (instance.name, instance.admin_up)
1040 112f18a5 Iustin Pop
      result = self._VerifyNode(node_i, file_names, local_checksums,
1041 6d2e83d5 Iustin Pop
                                nresult, feedback_fn, master_files,
1042 cc9e1230 Guido Trotter
                                node_drbd, vg_name)
1043 a8083063 Iustin Pop
      bad = bad or result
1044 a8083063 Iustin Pop
1045 25361b9a Iustin Pop
      lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
1046 cc9e1230 Guido Trotter
      if vg_name is None:
1047 cc9e1230 Guido Trotter
        node_volume[node] = {}
1048 cc9e1230 Guido Trotter
      elif isinstance(lvdata, basestring):
1049 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
1050 26f15862 Iustin Pop
                    (node, utils.SafeEncode(lvdata)))
1051 b63ed789 Iustin Pop
        bad = True
1052 b63ed789 Iustin Pop
        node_volume[node] = {}
1053 25361b9a Iustin Pop
      elif not isinstance(lvdata, dict):
1054 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (lvlist)" % (node,))
1055 a8083063 Iustin Pop
        bad = True
1056 a8083063 Iustin Pop
        continue
1057 b63ed789 Iustin Pop
      else:
1058 25361b9a Iustin Pop
        node_volume[node] = lvdata
1059 a8083063 Iustin Pop
1060 a8083063 Iustin Pop
      # node_instance
1061 25361b9a Iustin Pop
      idata = nresult.get(constants.NV_INSTANCELIST, None)
1062 25361b9a Iustin Pop
      if not isinstance(idata, list):
1063 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (instancelist)" %
1064 25361b9a Iustin Pop
                    (node,))
1065 a8083063 Iustin Pop
        bad = True
1066 a8083063 Iustin Pop
        continue
1067 a8083063 Iustin Pop
1068 25361b9a Iustin Pop
      node_instance[node] = idata
1069 a8083063 Iustin Pop
1070 9c9c7d30 Guido Trotter
      # node_info
1071 25361b9a Iustin Pop
      nodeinfo = nresult.get(constants.NV_HVINFO, None)
1072 9c9c7d30 Guido Trotter
      if not isinstance(nodeinfo, dict):
1073 25361b9a Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed (hvinfo)" % (node,))
1074 9c9c7d30 Guido Trotter
        bad = True
1075 9c9c7d30 Guido Trotter
        continue
1076 9c9c7d30 Guido Trotter
1077 9c9c7d30 Guido Trotter
      try:
1078 9c9c7d30 Guido Trotter
        node_info[node] = {
1079 9c9c7d30 Guido Trotter
          "mfree": int(nodeinfo['memory_free']),
1080 93e4c50b Guido Trotter
          "pinst": [],
1081 93e4c50b Guido Trotter
          "sinst": [],
1082 36e7da50 Guido Trotter
          # dictionary holding all instances this node is secondary for,
1083 36e7da50 Guido Trotter
          # grouped by their primary node. Each key is a cluster node, and each
1084 36e7da50 Guido Trotter
          # value is a list of instances which have the key as primary and the
1085 36e7da50 Guido Trotter
          # current node as secondary.  this is handy to calculate N+1 memory
1086 36e7da50 Guido Trotter
          # availability if you can only failover from a primary to its
1087 36e7da50 Guido Trotter
          # secondary.
1088 36e7da50 Guido Trotter
          "sinst-by-pnode": {},
1089 9c9c7d30 Guido Trotter
        }
1090 cc9e1230 Guido Trotter
        # FIXME: devise a free space model for file based instances as well
1091 cc9e1230 Guido Trotter
        if vg_name is not None:
1092 9a198532 Iustin Pop
          if (constants.NV_VGLIST not in nresult or
1093 9a198532 Iustin Pop
              vg_name not in nresult[constants.NV_VGLIST]):
1094 9a198532 Iustin Pop
            feedback_fn("  - ERROR: node %s didn't return data for the"
1095 9a198532 Iustin Pop
                        " volume group '%s' - it is either missing or broken" %
1096 9a198532 Iustin Pop
                        (node, vg_name))
1097 9a198532 Iustin Pop
            bad = True
1098 9a198532 Iustin Pop
            continue
1099 cc9e1230 Guido Trotter
          node_info[node]["dfree"] = int(nresult[constants.NV_VGLIST][vg_name])
1100 9a198532 Iustin Pop
      except (ValueError, KeyError):
1101 9a198532 Iustin Pop
        feedback_fn("  - ERROR: invalid nodeinfo value returned"
1102 9a198532 Iustin Pop
                    " from node %s" % (node,))
1103 9c9c7d30 Guido Trotter
        bad = True
1104 9c9c7d30 Guido Trotter
        continue
1105 9c9c7d30 Guido Trotter
1106 a8083063 Iustin Pop
    node_vol_should = {}
1107 a8083063 Iustin Pop
1108 a8083063 Iustin Pop
    for instance in instancelist:
1109 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
1110 6d2e83d5 Iustin Pop
      inst_config = instanceinfo[instance]
1111 c5705f58 Guido Trotter
      result =  self._VerifyInstance(instance, inst_config, node_volume,
1112 0a66c968 Iustin Pop
                                     node_instance, feedback_fn, n_offline)
1113 c5705f58 Guido Trotter
      bad = bad or result
1114 832261fd Iustin Pop
      inst_nodes_offline = []
1115 a8083063 Iustin Pop
1116 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
1117 a8083063 Iustin Pop
1118 26b6af5e Guido Trotter
      instance_cfg[instance] = inst_config
1119 26b6af5e Guido Trotter
1120 93e4c50b Guido Trotter
      pnode = inst_config.primary_node
1121 93e4c50b Guido Trotter
      if pnode in node_info:
1122 93e4c50b Guido Trotter
        node_info[pnode]['pinst'].append(instance)
1123 0a66c968 Iustin Pop
      elif pnode not in n_offline:
1124 93e4c50b Guido Trotter
        feedback_fn("  - ERROR: instance %s, connection to primary node"
1125 93e4c50b Guido Trotter
                    " %s failed" % (instance, pnode))
1126 93e4c50b Guido Trotter
        bad = True
1127 93e4c50b Guido Trotter
1128 832261fd Iustin Pop
      if pnode in n_offline:
1129 832261fd Iustin Pop
        inst_nodes_offline.append(pnode)
1130 832261fd Iustin Pop
1131 93e4c50b Guido Trotter
      # If the instance is non-redundant we cannot survive losing its primary
1132 93e4c50b Guido Trotter
      # node, so we are not N+1 compliant. On the other hand we have no disk
1133 93e4c50b Guido Trotter
      # templates with more than one secondary so that situation is not well
1134 93e4c50b Guido Trotter
      # supported either.
1135 93e4c50b Guido Trotter
      # FIXME: does not support file-backed instances
1136 93e4c50b Guido Trotter
      if len(inst_config.secondary_nodes) == 0:
1137 93e4c50b Guido Trotter
        i_non_redundant.append(instance)
1138 93e4c50b Guido Trotter
      elif len(inst_config.secondary_nodes) > 1:
1139 93e4c50b Guido Trotter
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
1140 93e4c50b Guido Trotter
                    % instance)
1141 93e4c50b Guido Trotter
1142 c0f2b229 Iustin Pop
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
1143 3924700f Iustin Pop
        i_non_a_balanced.append(instance)
1144 3924700f Iustin Pop
1145 93e4c50b Guido Trotter
      for snode in inst_config.secondary_nodes:
1146 93e4c50b Guido Trotter
        if snode in node_info:
1147 93e4c50b Guido Trotter
          node_info[snode]['sinst'].append(instance)
1148 36e7da50 Guido Trotter
          if pnode not in node_info[snode]['sinst-by-pnode']:
1149 36e7da50 Guido Trotter
            node_info[snode]['sinst-by-pnode'][pnode] = []
1150 36e7da50 Guido Trotter
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
1151 0a66c968 Iustin Pop
        elif snode not in n_offline:
1152 93e4c50b Guido Trotter
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
1153 93e4c50b Guido Trotter
                      " %s failed" % (instance, snode))
1154 832261fd Iustin Pop
          bad = True
1155 832261fd Iustin Pop
        if snode in n_offline:
1156 832261fd Iustin Pop
          inst_nodes_offline.append(snode)
1157 832261fd Iustin Pop
1158 832261fd Iustin Pop
      if inst_nodes_offline:
1159 832261fd Iustin Pop
        # warn that the instance lives on offline nodes, and set bad=True
1160 832261fd Iustin Pop
        feedback_fn("  - ERROR: instance lives on offline node(s) %s" %
1161 832261fd Iustin Pop
                    ", ".join(inst_nodes_offline))
1162 832261fd Iustin Pop
        bad = True
1163 93e4c50b Guido Trotter
1164 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
1165 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
1166 a8083063 Iustin Pop
                                       feedback_fn)
1167 a8083063 Iustin Pop
    bad = bad or result
1168 a8083063 Iustin Pop
1169 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
1170 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
1171 a8083063 Iustin Pop
                                         feedback_fn)
1172 a8083063 Iustin Pop
    bad = bad or result
1173 a8083063 Iustin Pop
1174 e54c4c5e Guido Trotter
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
1175 e54c4c5e Guido Trotter
      feedback_fn("* Verifying N+1 Memory redundancy")
1176 e54c4c5e Guido Trotter
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
1177 e54c4c5e Guido Trotter
      bad = bad or result
1178 2b3b6ddd Guido Trotter
1179 2b3b6ddd Guido Trotter
    feedback_fn("* Other Notes")
1180 2b3b6ddd Guido Trotter
    if i_non_redundant:
1181 2b3b6ddd Guido Trotter
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
1182 2b3b6ddd Guido Trotter
                  % len(i_non_redundant))
1183 2b3b6ddd Guido Trotter
1184 3924700f Iustin Pop
    if i_non_a_balanced:
1185 3924700f Iustin Pop
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
1186 3924700f Iustin Pop
                  % len(i_non_a_balanced))
1187 3924700f Iustin Pop
1188 0a66c968 Iustin Pop
    if n_offline:
1189 0a66c968 Iustin Pop
      feedback_fn("  - NOTICE: %d offline node(s) found." % len(n_offline))
1190 0a66c968 Iustin Pop
1191 22f0f71d Iustin Pop
    if n_drained:
1192 22f0f71d Iustin Pop
      feedback_fn("  - NOTICE: %d drained node(s) found." % len(n_drained))
1193 22f0f71d Iustin Pop
1194 34290825 Michael Hanselmann
    return not bad
1195 a8083063 Iustin Pop
1196 d8fff41c Guido Trotter
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1197 5bbd3f7f Michael Hanselmann
    """Analyze the post-hooks' result
1198 e4376078 Iustin Pop

1199 e4376078 Iustin Pop
    This method analyses the hook result, handles it, and sends some
1200 d8fff41c Guido Trotter
    nicely-formatted feedback back to the user.
1201 d8fff41c Guido Trotter

1202 e4376078 Iustin Pop
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
1203 e4376078 Iustin Pop
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
1204 e4376078 Iustin Pop
    @param hooks_results: the results of the multi-node hooks rpc call
1205 e4376078 Iustin Pop
    @param feedback_fn: function used send feedback back to the caller
1206 e4376078 Iustin Pop
    @param lu_result: previous Exec result
1207 e4376078 Iustin Pop
    @return: the new Exec result, based on the previous result
1208 e4376078 Iustin Pop
        and hook results
1209 d8fff41c Guido Trotter

1210 d8fff41c Guido Trotter
    """
1211 38206f3c Iustin Pop
    # We only really run POST phase hooks, and are only interested in
1212 38206f3c Iustin Pop
    # their results
1213 d8fff41c Guido Trotter
    if phase == constants.HOOKS_PHASE_POST:
1214 d8fff41c Guido Trotter
      # Used to change hooks' output to proper indentation
1215 d8fff41c Guido Trotter
      indent_re = re.compile('^', re.M)
1216 d8fff41c Guido Trotter
      feedback_fn("* Hooks Results")
1217 d8fff41c Guido Trotter
      if not hooks_results:
1218 d8fff41c Guido Trotter
        feedback_fn("  - ERROR: general communication failure")
1219 d8fff41c Guido Trotter
        lu_result = 1
1220 d8fff41c Guido Trotter
      else:
1221 d8fff41c Guido Trotter
        for node_name in hooks_results:
1222 d8fff41c Guido Trotter
          show_node_header = True
1223 d8fff41c Guido Trotter
          res = hooks_results[node_name]
1224 25361b9a Iustin Pop
          if res.failed or res.data is False or not isinstance(res.data, list):
1225 0a66c968 Iustin Pop
            if res.offline:
1226 0a66c968 Iustin Pop
              # no need to warn or set fail return value
1227 0a66c968 Iustin Pop
              continue
1228 25361b9a Iustin Pop
            feedback_fn("    Communication failure in hooks execution")
1229 d8fff41c Guido Trotter
            lu_result = 1
1230 d8fff41c Guido Trotter
            continue
1231 25361b9a Iustin Pop
          for script, hkr, output in res.data:
1232 d8fff41c Guido Trotter
            if hkr == constants.HKR_FAIL:
1233 d8fff41c Guido Trotter
              # The node header is only shown once, if there are
1234 d8fff41c Guido Trotter
              # failing hooks on that node
1235 d8fff41c Guido Trotter
              if show_node_header:
1236 d8fff41c Guido Trotter
                feedback_fn("  Node %s:" % node_name)
1237 d8fff41c Guido Trotter
                show_node_header = False
1238 d8fff41c Guido Trotter
              feedback_fn("    ERROR: Script %s failed, output:" % script)
1239 d8fff41c Guido Trotter
              output = indent_re.sub('      ', output)
1240 d8fff41c Guido Trotter
              feedback_fn("%s" % output)
1241 d8fff41c Guido Trotter
              lu_result = 1
1242 d8fff41c Guido Trotter
1243 d8fff41c Guido Trotter
      return lu_result
1244 d8fff41c Guido Trotter
1245 a8083063 Iustin Pop
1246 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
1247 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
1248 2c95a8d4 Iustin Pop

1249 2c95a8d4 Iustin Pop
  """
1250 2c95a8d4 Iustin Pop
  _OP_REQP = []
1251 d4b9d97f Guido Trotter
  REQ_BGL = False
1252 d4b9d97f Guido Trotter
1253 d4b9d97f Guido Trotter
  def ExpandNames(self):
1254 d4b9d97f Guido Trotter
    self.needed_locks = {
1255 d4b9d97f Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1256 d4b9d97f Guido Trotter
      locking.LEVEL_INSTANCE: locking.ALL_SET,
1257 d4b9d97f Guido Trotter
    }
1258 d4b9d97f Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1259 2c95a8d4 Iustin Pop
1260 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
1261 2c95a8d4 Iustin Pop
    """Check prerequisites.
1262 2c95a8d4 Iustin Pop

1263 2c95a8d4 Iustin Pop
    This has no prerequisites.
1264 2c95a8d4 Iustin Pop

1265 2c95a8d4 Iustin Pop
    """
1266 2c95a8d4 Iustin Pop
    pass
1267 2c95a8d4 Iustin Pop
1268 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
1269 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
1270 2c95a8d4 Iustin Pop

1271 2c95a8d4 Iustin Pop
    """
1272 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
1273 2c95a8d4 Iustin Pop
1274 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
1275 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
1276 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1277 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1278 2c95a8d4 Iustin Pop
1279 2c95a8d4 Iustin Pop
    nv_dict = {}
1280 2c95a8d4 Iustin Pop
    for inst in instances:
1281 2c95a8d4 Iustin Pop
      inst_lvs = {}
1282 0d68c45d Iustin Pop
      if (not inst.admin_up or
1283 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
1284 2c95a8d4 Iustin Pop
        continue
1285 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
1286 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
1287 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
1288 2c95a8d4 Iustin Pop
        for vol in vol_list:
1289 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
1290 2c95a8d4 Iustin Pop
1291 2c95a8d4 Iustin Pop
    if not nv_dict:
1292 2c95a8d4 Iustin Pop
      return result
1293 2c95a8d4 Iustin Pop
1294 72737a7f Iustin Pop
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1295 2c95a8d4 Iustin Pop
1296 2c95a8d4 Iustin Pop
    for node in nodes:
1297 2c95a8d4 Iustin Pop
      # node_volume
1298 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
1299 781de953 Iustin Pop
      if lvs.failed:
1300 0a66c968 Iustin Pop
        if not lvs.offline:
1301 0a66c968 Iustin Pop
          self.LogWarning("Connection to node %s failed: %s" %
1302 0a66c968 Iustin Pop
                          (node, lvs.data))
1303 781de953 Iustin Pop
        continue
1304 781de953 Iustin Pop
      lvs = lvs.data
1305 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
1306 9a4f63d1 Iustin Pop
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
1307 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
1308 ea9ddc07 Iustin Pop
        continue
1309 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
1310 9a4f63d1 Iustin Pop
        logging.warning("Connection to node %s failed or invalid data"
1311 9a4f63d1 Iustin Pop
                        " returned", node)
1312 2c95a8d4 Iustin Pop
        res_nodes.append(node)
1313 2c95a8d4 Iustin Pop
        continue
1314 2c95a8d4 Iustin Pop
1315 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1316 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
1317 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
1318 b63ed789 Iustin Pop
            and inst.name not in res_instances):
1319 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
1320 2c95a8d4 Iustin Pop
1321 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
1322 b63ed789 Iustin Pop
    # data better
1323 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
1324 b63ed789 Iustin Pop
      if inst.name not in res_missing:
1325 b63ed789 Iustin Pop
        res_missing[inst.name] = []
1326 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
1327 b63ed789 Iustin Pop
1328 2c95a8d4 Iustin Pop
    return result
1329 2c95a8d4 Iustin Pop
1330 2c95a8d4 Iustin Pop
1331 60975797 Iustin Pop
class LURepairDiskSizes(NoHooksLU):
1332 60975797 Iustin Pop
  """Verifies the cluster disks sizes.
1333 60975797 Iustin Pop

1334 60975797 Iustin Pop
  """
1335 60975797 Iustin Pop
  _OP_REQP = ["instances"]
1336 60975797 Iustin Pop
  REQ_BGL = False
1337 60975797 Iustin Pop
1338 60975797 Iustin Pop
  def ExpandNames(self):
1339 60975797 Iustin Pop
1340 60975797 Iustin Pop
    if not isinstance(self.op.instances, list):
1341 60975797 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
1342 60975797 Iustin Pop
1343 60975797 Iustin Pop
    if self.op.instances:
1344 60975797 Iustin Pop
      self.wanted_names = []
1345 60975797 Iustin Pop
      for name in self.op.instances:
1346 60975797 Iustin Pop
        full_name = self.cfg.ExpandInstanceName(name)
1347 60975797 Iustin Pop
        if full_name is None:
1348 60975797 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
1349 60975797 Iustin Pop
        self.wanted_names.append(full_name)
1350 60975797 Iustin Pop
      self.needed_locks = {
1351 60975797 Iustin Pop
        locking.LEVEL_NODE: [],
1352 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: self.wanted_names,
1353 60975797 Iustin Pop
        }
1354 60975797 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1355 60975797 Iustin Pop
    else:
1356 60975797 Iustin Pop
      self.wanted_names = None
1357 60975797 Iustin Pop
      self.needed_locks = {
1358 60975797 Iustin Pop
        locking.LEVEL_NODE: locking.ALL_SET,
1359 60975797 Iustin Pop
        locking.LEVEL_INSTANCE: locking.ALL_SET,
1360 60975797 Iustin Pop
        }
1361 60975797 Iustin Pop
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
1362 60975797 Iustin Pop
1363 60975797 Iustin Pop
  def DeclareLocks(self, level):
1364 60975797 Iustin Pop
    if level == locking.LEVEL_NODE and self.wanted_names is not None:
1365 60975797 Iustin Pop
      self._LockInstancesNodes(primary_only=True)
1366 60975797 Iustin Pop
1367 60975797 Iustin Pop
  def CheckPrereq(self):
1368 60975797 Iustin Pop
    """Check prerequisites.
1369 60975797 Iustin Pop

1370 60975797 Iustin Pop
    This only checks the optional instance list against the existing names.
1371 60975797 Iustin Pop

1372 60975797 Iustin Pop
    """
1373 60975797 Iustin Pop
    if self.wanted_names is None:
1374 60975797 Iustin Pop
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
1375 60975797 Iustin Pop
1376 60975797 Iustin Pop
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
1377 60975797 Iustin Pop
                             in self.wanted_names]
1378 60975797 Iustin Pop
1379 b775c337 Iustin Pop
  def _EnsureChildSizes(self, disk):
1380 b775c337 Iustin Pop
    """Ensure children of the disk have the needed disk size.
1381 b775c337 Iustin Pop

1382 b775c337 Iustin Pop
    This is valid mainly for DRBD8 and fixes an issue where the
1383 b775c337 Iustin Pop
    children have smaller disk size.
1384 b775c337 Iustin Pop

1385 b775c337 Iustin Pop
    @param disk: an L{ganeti.objects.Disk} object
1386 b775c337 Iustin Pop

1387 b775c337 Iustin Pop
    """
1388 b775c337 Iustin Pop
    if disk.dev_type == constants.LD_DRBD8:
1389 b775c337 Iustin Pop
      assert disk.children, "Empty children for DRBD8?"
1390 b775c337 Iustin Pop
      fchild = disk.children[0]
1391 b775c337 Iustin Pop
      mismatch = fchild.size < disk.size
1392 b775c337 Iustin Pop
      if mismatch:
1393 b775c337 Iustin Pop
        self.LogInfo("Child disk has size %d, parent %d, fixing",
1394 b775c337 Iustin Pop
                     fchild.size, disk.size)
1395 b775c337 Iustin Pop
        fchild.size = disk.size
1396 b775c337 Iustin Pop
1397 b775c337 Iustin Pop
      # and we recurse on this child only, not on the metadev
1398 b775c337 Iustin Pop
      return self._EnsureChildSizes(fchild) or mismatch
1399 b775c337 Iustin Pop
    else:
1400 b775c337 Iustin Pop
      return False
1401 b775c337 Iustin Pop
1402 60975797 Iustin Pop
  def Exec(self, feedback_fn):
1403 60975797 Iustin Pop
    """Verify the size of cluster disks.
1404 60975797 Iustin Pop

1405 60975797 Iustin Pop
    """
1406 60975797 Iustin Pop
    # TODO: check child disks too
1407 60975797 Iustin Pop
    # TODO: check differences in size between primary/secondary nodes
1408 60975797 Iustin Pop
    per_node_disks = {}
1409 60975797 Iustin Pop
    for instance in self.wanted_instances:
1410 60975797 Iustin Pop
      pnode = instance.primary_node
1411 60975797 Iustin Pop
      if pnode not in per_node_disks:
1412 60975797 Iustin Pop
        per_node_disks[pnode] = []
1413 60975797 Iustin Pop
      for idx, disk in enumerate(instance.disks):
1414 60975797 Iustin Pop
        per_node_disks[pnode].append((instance, idx, disk))
1415 60975797 Iustin Pop
1416 60975797 Iustin Pop
    changed = []
1417 60975797 Iustin Pop
    for node, dskl in per_node_disks.items():
1418 4d9e6835 Iustin Pop
      newl = [v[2].Copy() for v in dskl]
1419 4d9e6835 Iustin Pop
      for dsk in newl:
1420 4d9e6835 Iustin Pop
        self.cfg.SetDiskID(dsk, node)
1421 4d9e6835 Iustin Pop
      result = self.rpc.call_blockdev_getsizes(node, newl)
1422 60975797 Iustin Pop
      if result.failed:
1423 60975797 Iustin Pop
        self.LogWarning("Failure in blockdev_getsizes call to node"
1424 60975797 Iustin Pop
                        " %s, ignoring", node)
1425 60975797 Iustin Pop
        continue
1426 60975797 Iustin Pop
      if len(result.data) != len(dskl):
1427 60975797 Iustin Pop
        self.LogWarning("Invalid result from node %s, ignoring node results",
1428 60975797 Iustin Pop
                        node)
1429 60975797 Iustin Pop
        continue
1430 60975797 Iustin Pop
      for ((instance, idx, disk), size) in zip(dskl, result.data):
1431 60975797 Iustin Pop
        if size is None:
1432 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return size"
1433 60975797 Iustin Pop
                          " information, ignoring", idx, instance.name)
1434 60975797 Iustin Pop
          continue
1435 60975797 Iustin Pop
        if not isinstance(size, (int, long)):
1436 60975797 Iustin Pop
          self.LogWarning("Disk %d of instance %s did not return valid"
1437 60975797 Iustin Pop
                          " size information, ignoring", idx, instance.name)
1438 60975797 Iustin Pop
          continue
1439 60975797 Iustin Pop
        size = size >> 20
1440 60975797 Iustin Pop
        if size != disk.size:
1441 60975797 Iustin Pop
          self.LogInfo("Disk %d of instance %s has mismatched size,"
1442 60975797 Iustin Pop
                       " correcting: recorded %d, actual %d", idx,
1443 60975797 Iustin Pop
                       instance.name, disk.size, size)
1444 60975797 Iustin Pop
          disk.size = size
1445 60975797 Iustin Pop
          self.cfg.Update(instance)
1446 60975797 Iustin Pop
          changed.append((instance.name, idx, size))
1447 b775c337 Iustin Pop
        if self._EnsureChildSizes(disk):
1448 b775c337 Iustin Pop
          self.cfg.Update(instance)
1449 b775c337 Iustin Pop
          changed.append((instance.name, idx, disk.size))
1450 60975797 Iustin Pop
    return changed
1451 60975797 Iustin Pop
1452 60975797 Iustin Pop
1453 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
1454 07bd8a51 Iustin Pop
  """Rename the cluster.
1455 07bd8a51 Iustin Pop

1456 07bd8a51 Iustin Pop
  """
1457 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
1458 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1459 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
1460 07bd8a51 Iustin Pop
1461 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
1462 07bd8a51 Iustin Pop
    """Build hooks env.
1463 07bd8a51 Iustin Pop

1464 07bd8a51 Iustin Pop
    """
1465 07bd8a51 Iustin Pop
    env = {
1466 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1467 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
1468 07bd8a51 Iustin Pop
      }
1469 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1470 07bd8a51 Iustin Pop
    return env, [mn], [mn]
1471 07bd8a51 Iustin Pop
1472 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1473 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1474 07bd8a51 Iustin Pop

1475 07bd8a51 Iustin Pop
    """
1476 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1477 07bd8a51 Iustin Pop
1478 bcf043c9 Iustin Pop
    new_name = hostname.name
1479 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1480 d6a02168 Michael Hanselmann
    old_name = self.cfg.GetClusterName()
1481 d6a02168 Michael Hanselmann
    old_ip = self.cfg.GetMasterIP()
1482 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1483 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1484 07bd8a51 Iustin Pop
                                 " cluster has changed")
1485 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1486 937f983d Guido Trotter
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1487 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1488 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1489 07bd8a51 Iustin Pop
                                   new_ip)
1490 07bd8a51 Iustin Pop
1491 07bd8a51 Iustin Pop
    self.op.name = new_name
1492 07bd8a51 Iustin Pop
1493 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1494 07bd8a51 Iustin Pop
    """Rename the cluster.
1495 07bd8a51 Iustin Pop

1496 07bd8a51 Iustin Pop
    """
1497 07bd8a51 Iustin Pop
    clustername = self.op.name
1498 07bd8a51 Iustin Pop
    ip = self.ip
1499 07bd8a51 Iustin Pop
1500 07bd8a51 Iustin Pop
    # shutdown the master IP
1501 d6a02168 Michael Hanselmann
    master = self.cfg.GetMasterNode()
1502 781de953 Iustin Pop
    result = self.rpc.call_node_stop_master(master, False)
1503 781de953 Iustin Pop
    if result.failed or not result.data:
1504 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1505 07bd8a51 Iustin Pop
1506 07bd8a51 Iustin Pop
    try:
1507 55cf7d83 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
1508 55cf7d83 Iustin Pop
      cluster.cluster_name = clustername
1509 55cf7d83 Iustin Pop
      cluster.master_ip = ip
1510 55cf7d83 Iustin Pop
      self.cfg.Update(cluster)
1511 ec85e3d5 Iustin Pop
1512 ec85e3d5 Iustin Pop
      # update the known hosts file
1513 ec85e3d5 Iustin Pop
      ssh.WriteKnownHostsFile(self.cfg, constants.SSH_KNOWN_HOSTS_FILE)
1514 ec85e3d5 Iustin Pop
      node_list = self.cfg.GetNodeList()
1515 ec85e3d5 Iustin Pop
      try:
1516 ec85e3d5 Iustin Pop
        node_list.remove(master)
1517 ec85e3d5 Iustin Pop
      except ValueError:
1518 ec85e3d5 Iustin Pop
        pass
1519 ec85e3d5 Iustin Pop
      result = self.rpc.call_upload_file(node_list,
1520 ec85e3d5 Iustin Pop
                                         constants.SSH_KNOWN_HOSTS_FILE)
1521 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
1522 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
1523 d1dc3548 Iustin Pop
          logging.error("Copy of file %s to node %s failed",
1524 d1dc3548 Iustin Pop
                        constants.SSH_KNOWN_HOSTS_FILE, to_node)
1525 ec85e3d5 Iustin Pop
1526 07bd8a51 Iustin Pop
    finally:
1527 2503680f Guido Trotter
      result = self.rpc.call_node_start_master(master, False, False)
1528 781de953 Iustin Pop
      if result.failed or not result.data:
1529 86d9d3bb Iustin Pop
        self.LogWarning("Could not re-enable the master role on"
1530 86d9d3bb Iustin Pop
                        " the master, please restart manually.")
1531 07bd8a51 Iustin Pop
1532 07bd8a51 Iustin Pop
1533 8084f9f6 Manuel Franceschini
def _RecursiveCheckIfLVMBased(disk):
1534 8084f9f6 Manuel Franceschini
  """Check if the given disk or its children are lvm-based.
1535 8084f9f6 Manuel Franceschini

1536 e4376078 Iustin Pop
  @type disk: L{objects.Disk}
1537 e4376078 Iustin Pop
  @param disk: the disk to check
1538 5bbd3f7f Michael Hanselmann
  @rtype: boolean
1539 e4376078 Iustin Pop
  @return: boolean indicating whether a LD_LV dev_type was found or not
1540 8084f9f6 Manuel Franceschini

1541 8084f9f6 Manuel Franceschini
  """
1542 8084f9f6 Manuel Franceschini
  if disk.children:
1543 8084f9f6 Manuel Franceschini
    for chdisk in disk.children:
1544 8084f9f6 Manuel Franceschini
      if _RecursiveCheckIfLVMBased(chdisk):
1545 8084f9f6 Manuel Franceschini
        return True
1546 8084f9f6 Manuel Franceschini
  return disk.dev_type == constants.LD_LV
1547 8084f9f6 Manuel Franceschini
1548 8084f9f6 Manuel Franceschini
1549 8084f9f6 Manuel Franceschini
class LUSetClusterParams(LogicalUnit):
1550 8084f9f6 Manuel Franceschini
  """Change the parameters of the cluster.
1551 8084f9f6 Manuel Franceschini

1552 8084f9f6 Manuel Franceschini
  """
1553 8084f9f6 Manuel Franceschini
  HPATH = "cluster-modify"
1554 8084f9f6 Manuel Franceschini
  HTYPE = constants.HTYPE_CLUSTER
1555 8084f9f6 Manuel Franceschini
  _OP_REQP = []
1556 c53279cf Guido Trotter
  REQ_BGL = False
1557 c53279cf Guido Trotter
1558 3994f455 Iustin Pop
  def CheckArguments(self):
1559 4b7735f9 Iustin Pop
    """Check parameters
1560 4b7735f9 Iustin Pop

1561 4b7735f9 Iustin Pop
    """
1562 4b7735f9 Iustin Pop
    if not hasattr(self.op, "candidate_pool_size"):
1563 4b7735f9 Iustin Pop
      self.op.candidate_pool_size = None
1564 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1565 4b7735f9 Iustin Pop
      try:
1566 4b7735f9 Iustin Pop
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
1567 3994f455 Iustin Pop
      except (ValueError, TypeError), err:
1568 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("Invalid candidate_pool_size value: %s" %
1569 4b7735f9 Iustin Pop
                                   str(err))
1570 4b7735f9 Iustin Pop
      if self.op.candidate_pool_size < 1:
1571 4b7735f9 Iustin Pop
        raise errors.OpPrereqError("At least one master candidate needed")
1572 4b7735f9 Iustin Pop
1573 c53279cf Guido Trotter
  def ExpandNames(self):
1574 c53279cf Guido Trotter
    # FIXME: in the future maybe other cluster params won't require checking on
1575 c53279cf Guido Trotter
    # all nodes to be modified.
1576 c53279cf Guido Trotter
    self.needed_locks = {
1577 c53279cf Guido Trotter
      locking.LEVEL_NODE: locking.ALL_SET,
1578 c53279cf Guido Trotter
    }
1579 c53279cf Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
1580 8084f9f6 Manuel Franceschini
1581 8084f9f6 Manuel Franceschini
  def BuildHooksEnv(self):
1582 8084f9f6 Manuel Franceschini
    """Build hooks env.
1583 8084f9f6 Manuel Franceschini

1584 8084f9f6 Manuel Franceschini
    """
1585 8084f9f6 Manuel Franceschini
    env = {
1586 d6a02168 Michael Hanselmann
      "OP_TARGET": self.cfg.GetClusterName(),
1587 8084f9f6 Manuel Franceschini
      "NEW_VG_NAME": self.op.vg_name,
1588 8084f9f6 Manuel Franceschini
      }
1589 d6a02168 Michael Hanselmann
    mn = self.cfg.GetMasterNode()
1590 8084f9f6 Manuel Franceschini
    return env, [mn], [mn]
1591 8084f9f6 Manuel Franceschini
1592 8084f9f6 Manuel Franceschini
  def CheckPrereq(self):
1593 8084f9f6 Manuel Franceschini
    """Check prerequisites.
1594 8084f9f6 Manuel Franceschini

1595 8084f9f6 Manuel Franceschini
    This checks whether the given params don't conflict and
1596 5f83e263 Iustin Pop
    if the given volume group is valid.
1597 8084f9f6 Manuel Franceschini

1598 8084f9f6 Manuel Franceschini
    """
1599 779c15bb Iustin Pop
    if self.op.vg_name is not None and not self.op.vg_name:
1600 c53279cf Guido Trotter
      instances = self.cfg.GetAllInstancesInfo().values()
1601 8084f9f6 Manuel Franceschini
      for inst in instances:
1602 8084f9f6 Manuel Franceschini
        for disk in inst.disks:
1603 8084f9f6 Manuel Franceschini
          if _RecursiveCheckIfLVMBased(disk):
1604 8084f9f6 Manuel Franceschini
            raise errors.OpPrereqError("Cannot disable lvm storage while"
1605 8084f9f6 Manuel Franceschini
                                       " lvm-based instances exist")
1606 8084f9f6 Manuel Franceschini
1607 779c15bb Iustin Pop
    node_list = self.acquired_locks[locking.LEVEL_NODE]
1608 779c15bb Iustin Pop
1609 8084f9f6 Manuel Franceschini
    # if vg_name not None, checks given volume group on all nodes
1610 8084f9f6 Manuel Franceschini
    if self.op.vg_name:
1611 72737a7f Iustin Pop
      vglist = self.rpc.call_vg_list(node_list)
1612 8084f9f6 Manuel Franceschini
      for node in node_list:
1613 781de953 Iustin Pop
        if vglist[node].failed:
1614 781de953 Iustin Pop
          # ignoring down node
1615 781de953 Iustin Pop
          self.LogWarning("Node %s unreachable/error, ignoring" % node)
1616 781de953 Iustin Pop
          continue
1617 781de953 Iustin Pop
        vgstatus = utils.CheckVolumeGroupSize(vglist[node].data,
1618 781de953 Iustin Pop
                                              self.op.vg_name,
1619 8d1a2a64 Michael Hanselmann
                                              constants.MIN_VG_SIZE)
1620 8084f9f6 Manuel Franceschini
        if vgstatus:
1621 8084f9f6 Manuel Franceschini
          raise errors.OpPrereqError("Error on node '%s': %s" %
1622 8084f9f6 Manuel Franceschini
                                     (node, vgstatus))
1623 8084f9f6 Manuel Franceschini
1624 779c15bb Iustin Pop
    self.cluster = cluster = self.cfg.GetClusterInfo()
1625 d4b72030 Guido Trotter
    # validate beparams changes
1626 779c15bb Iustin Pop
    if self.op.beparams:
1627 a5728081 Guido Trotter
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1628 779c15bb Iustin Pop
      self.new_beparams = cluster.FillDict(
1629 779c15bb Iustin Pop
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)
1630 779c15bb Iustin Pop
1631 779c15bb Iustin Pop
    # hypervisor list/parameters
1632 779c15bb Iustin Pop
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
1633 779c15bb Iustin Pop
    if self.op.hvparams:
1634 779c15bb Iustin Pop
      if not isinstance(self.op.hvparams, dict):
1635 779c15bb Iustin Pop
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
1636 779c15bb Iustin Pop
      for hv_name, hv_dict in self.op.hvparams.items():
1637 779c15bb Iustin Pop
        if hv_name not in self.new_hvparams:
1638 779c15bb Iustin Pop
          self.new_hvparams[hv_name] = hv_dict
1639 779c15bb Iustin Pop
        else:
1640 779c15bb Iustin Pop
          self.new_hvparams[hv_name].update(hv_dict)
1641 779c15bb Iustin Pop
1642 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1643 779c15bb Iustin Pop
      self.hv_list = self.op.enabled_hypervisors
1644 b119bccb Guido Trotter
      if not self.hv_list:
1645 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors list must contain at"
1646 b119bccb Guido Trotter
                                   " least one member")
1647 b119bccb Guido Trotter
      invalid_hvs = set(self.hv_list) - constants.HYPER_TYPES
1648 b119bccb Guido Trotter
      if invalid_hvs:
1649 b119bccb Guido Trotter
        raise errors.OpPrereqError("Enabled hypervisors contains invalid"
1650 b119bccb Guido Trotter
                                   " entries: %s" % invalid_hvs)
1651 779c15bb Iustin Pop
    else:
1652 779c15bb Iustin Pop
      self.hv_list = cluster.enabled_hypervisors
1653 779c15bb Iustin Pop
1654 779c15bb Iustin Pop
    if self.op.hvparams or self.op.enabled_hypervisors is not None:
1655 779c15bb Iustin Pop
      # either the enabled list has changed, or the parameters have, validate
1656 779c15bb Iustin Pop
      for hv_name, hv_params in self.new_hvparams.items():
1657 779c15bb Iustin Pop
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
1658 779c15bb Iustin Pop
            (self.op.enabled_hypervisors and
1659 779c15bb Iustin Pop
             hv_name in self.op.enabled_hypervisors)):
1660 779c15bb Iustin Pop
          # either this is a new hypervisor, or its parameters have changed
1661 779c15bb Iustin Pop
          hv_class = hypervisor.GetHypervisor(hv_name)
1662 a5728081 Guido Trotter
          utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1663 779c15bb Iustin Pop
          hv_class.CheckParameterSyntax(hv_params)
1664 779c15bb Iustin Pop
          _CheckHVParams(self, node_list, hv_name, hv_params)
1665 779c15bb Iustin Pop
1666 8084f9f6 Manuel Franceschini
  def Exec(self, feedback_fn):
1667 8084f9f6 Manuel Franceschini
    """Change the parameters of the cluster.
1668 8084f9f6 Manuel Franceschini

1669 8084f9f6 Manuel Franceschini
    """
1670 779c15bb Iustin Pop
    if self.op.vg_name is not None:
1671 b2482333 Guido Trotter
      new_volume = self.op.vg_name
1672 b2482333 Guido Trotter
      if not new_volume:
1673 b2482333 Guido Trotter
        new_volume = None
1674 b2482333 Guido Trotter
      if new_volume != self.cfg.GetVGName():
1675 b2482333 Guido Trotter
        self.cfg.SetVGName(new_volume)
1676 779c15bb Iustin Pop
      else:
1677 779c15bb Iustin Pop
        feedback_fn("Cluster LVM configuration already in desired"
1678 779c15bb Iustin Pop
                    " state, not changing")
1679 779c15bb Iustin Pop
    if self.op.hvparams:
1680 779c15bb Iustin Pop
      self.cluster.hvparams = self.new_hvparams
1681 779c15bb Iustin Pop
    if self.op.enabled_hypervisors is not None:
1682 779c15bb Iustin Pop
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1683 779c15bb Iustin Pop
    if self.op.beparams:
1684 779c15bb Iustin Pop
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
1685 4b7735f9 Iustin Pop
    if self.op.candidate_pool_size is not None:
1686 4b7735f9 Iustin Pop
      self.cluster.candidate_pool_size = self.op.candidate_pool_size
1687 75e914fb Iustin Pop
      # we need to update the pool size here, otherwise the save will fail
1688 75e914fb Iustin Pop
      _AdjustCandidatePool(self)
1689 4b7735f9 Iustin Pop
1690 779c15bb Iustin Pop
    self.cfg.Update(self.cluster)
1691 8084f9f6 Manuel Franceschini
1692 8084f9f6 Manuel Franceschini
1693 afee0879 Iustin Pop
class LURedistributeConfig(NoHooksLU):
1694 afee0879 Iustin Pop
  """Force the redistribution of cluster configuration.
1695 afee0879 Iustin Pop

1696 afee0879 Iustin Pop
  This is a very simple LU.
1697 afee0879 Iustin Pop

1698 afee0879 Iustin Pop
  """
1699 afee0879 Iustin Pop
  _OP_REQP = []
1700 afee0879 Iustin Pop
  REQ_BGL = False
1701 afee0879 Iustin Pop
1702 afee0879 Iustin Pop
  def ExpandNames(self):
1703 afee0879 Iustin Pop
    self.needed_locks = {
1704 afee0879 Iustin Pop
      locking.LEVEL_NODE: locking.ALL_SET,
1705 afee0879 Iustin Pop
    }
1706 afee0879 Iustin Pop
    self.share_locks[locking.LEVEL_NODE] = 1
1707 afee0879 Iustin Pop
1708 afee0879 Iustin Pop
  def CheckPrereq(self):
1709 afee0879 Iustin Pop
    """Check prerequisites.
1710 afee0879 Iustin Pop

1711 afee0879 Iustin Pop
    """
1712 afee0879 Iustin Pop
1713 afee0879 Iustin Pop
  def Exec(self, feedback_fn):
1714 afee0879 Iustin Pop
    """Redistribute the configuration.
1715 afee0879 Iustin Pop

1716 afee0879 Iustin Pop
    """
1717 afee0879 Iustin Pop
    self.cfg.Update(self.cfg.GetClusterInfo())
1718 afee0879 Iustin Pop
1719 afee0879 Iustin Pop
1720 b9bddb6b Iustin Pop
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
1721 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1722 a8083063 Iustin Pop

1723 a8083063 Iustin Pop
  """
1724 a8083063 Iustin Pop
  if not instance.disks:
1725 a8083063 Iustin Pop
    return True
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
  if not oneshot:
1728 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1729 a8083063 Iustin Pop
1730 a8083063 Iustin Pop
  node = instance.primary_node
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
  for dev in instance.disks:
1733 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(dev, node)
1734 a8083063 Iustin Pop
1735 a8083063 Iustin Pop
  retries = 0
1736 fbafd7a8 Iustin Pop
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1737 a8083063 Iustin Pop
  while True:
1738 a8083063 Iustin Pop
    max_time = 0
1739 a8083063 Iustin Pop
    done = True
1740 a8083063 Iustin Pop
    cumul_degraded = False
1741 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1742 781de953 Iustin Pop
    if rstats.failed or not rstats.data:
1743 86d9d3bb Iustin Pop
      lu.LogWarning("Can't get any data from node %s", node)
1744 a8083063 Iustin Pop
      retries += 1
1745 a8083063 Iustin Pop
      if retries >= 10:
1746 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1747 3ecf6786 Iustin Pop
                                 " aborting." % node)
1748 a8083063 Iustin Pop
      time.sleep(6)
1749 a8083063 Iustin Pop
      continue
1750 781de953 Iustin Pop
    rstats = rstats.data
1751 a8083063 Iustin Pop
    retries = 0
1752 1492cca7 Iustin Pop
    for i, mstat in enumerate(rstats):
1753 a8083063 Iustin Pop
      if mstat is None:
1754 86d9d3bb Iustin Pop
        lu.LogWarning("Can't compute data for node %s/%s",
1755 86d9d3bb Iustin Pop
                           node, instance.disks[i].iv_name)
1756 a8083063 Iustin Pop
        continue
1757 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1758 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1759 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1760 a8083063 Iustin Pop
      if perc_done is not None:
1761 a8083063 Iustin Pop
        done = False
1762 a8083063 Iustin Pop
        if est_time is not None:
1763 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1764 a8083063 Iustin Pop
          max_time = est_time
1765 a8083063 Iustin Pop
        else:
1766 a8083063 Iustin Pop
          rem_time = "no time estimate"
1767 b9bddb6b Iustin Pop
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
1768 b9bddb6b Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1769 fbafd7a8 Iustin Pop
1770 fbafd7a8 Iustin Pop
    # if we're done but degraded, let's do a few small retries, to
1771 fbafd7a8 Iustin Pop
    # make sure we see a stable and not transient situation; therefore
1772 fbafd7a8 Iustin Pop
    # we force restart of the loop
1773 fbafd7a8 Iustin Pop
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1774 fbafd7a8 Iustin Pop
      logging.info("Degraded disks found, %d retries left", degr_retries)
1775 fbafd7a8 Iustin Pop
      degr_retries -= 1
1776 fbafd7a8 Iustin Pop
      time.sleep(1)
1777 fbafd7a8 Iustin Pop
      continue
1778 fbafd7a8 Iustin Pop
1779 a8083063 Iustin Pop
    if done or oneshot:
1780 a8083063 Iustin Pop
      break
1781 a8083063 Iustin Pop
1782 d4fa5c23 Iustin Pop
    time.sleep(min(60, max_time))
1783 a8083063 Iustin Pop
1784 a8083063 Iustin Pop
  if done:
1785 b9bddb6b Iustin Pop
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1786 a8083063 Iustin Pop
  return not cumul_degraded
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
1789 b9bddb6b Iustin Pop
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
1790 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1791 a8083063 Iustin Pop

1792 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1793 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1794 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1795 0834c866 Iustin Pop

1796 a8083063 Iustin Pop
  """
1797 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(dev, node)
1798 0834c866 Iustin Pop
  if ldisk:
1799 0834c866 Iustin Pop
    idx = 6
1800 0834c866 Iustin Pop
  else:
1801 0834c866 Iustin Pop
    idx = 5
1802 a8083063 Iustin Pop
1803 a8083063 Iustin Pop
  result = True
1804 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1805 72737a7f Iustin Pop
    rstats = lu.rpc.call_blockdev_find(node, dev)
1806 23829f6f Iustin Pop
    msg = rstats.RemoteFailMsg()
1807 23829f6f Iustin Pop
    if msg:
1808 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1809 23829f6f Iustin Pop
      result = False
1810 23829f6f Iustin Pop
    elif not rstats.payload:
1811 23829f6f Iustin Pop
      lu.LogWarning("Can't find disk on node %s", node)
1812 a8083063 Iustin Pop
      result = False
1813 a8083063 Iustin Pop
    else:
1814 23829f6f Iustin Pop
      result = result and (not rstats.payload[idx])
1815 a8083063 Iustin Pop
  if dev.children:
1816 a8083063 Iustin Pop
    for child in dev.children:
1817 b9bddb6b Iustin Pop
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
  return result
1820 a8083063 Iustin Pop
1821 a8083063 Iustin Pop
1822 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1823 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1824 a8083063 Iustin Pop

1825 a8083063 Iustin Pop
  """
1826 1f9430d6 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1827 6bf01bbb Guido Trotter
  REQ_BGL = False
1828 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet()
1829 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("name", "valid", "node_status")
1830 a8083063 Iustin Pop
1831 6bf01bbb Guido Trotter
  def ExpandNames(self):
1832 1f9430d6 Iustin Pop
    if self.op.names:
1833 1f9430d6 Iustin Pop
      raise errors.OpPrereqError("Selective OS query not supported")
1834 1f9430d6 Iustin Pop
1835 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
1836 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
1837 1f9430d6 Iustin Pop
                       selected=self.op.output_fields)
1838 1f9430d6 Iustin Pop
1839 6bf01bbb Guido Trotter
    # Lock all nodes, in shared mode
1840 a6ab004b Iustin Pop
    # Temporary removal of locks, should be reverted later
1841 a6ab004b Iustin Pop
    # TODO: reintroduce locks when they are lighter-weight
1842 6bf01bbb Guido Trotter
    self.needed_locks = {}
1843 a6ab004b Iustin Pop
    #self.share_locks[locking.LEVEL_NODE] = 1
1844 a6ab004b Iustin Pop
    #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1845 6bf01bbb Guido Trotter
1846 6bf01bbb Guido Trotter
  def CheckPrereq(self):
1847 6bf01bbb Guido Trotter
    """Check prerequisites.
1848 6bf01bbb Guido Trotter

1849 6bf01bbb Guido Trotter
    """
1850 6bf01bbb Guido Trotter
1851 1f9430d6 Iustin Pop
  @staticmethod
1852 1f9430d6 Iustin Pop
  def _DiagnoseByOS(node_list, rlist):
1853 1f9430d6 Iustin Pop
    """Remaps a per-node return list into an a per-os per-node dictionary
1854 1f9430d6 Iustin Pop

1855 e4376078 Iustin Pop
    @param node_list: a list with the names of all nodes
1856 e4376078 Iustin Pop
    @param rlist: a map with node names as keys and OS objects as values
1857 1f9430d6 Iustin Pop

1858 e4376078 Iustin Pop
    @rtype: dict
1859 5fcc718f Iustin Pop
    @return: a dictionary with osnames as keys and as value another map, with
1860 e4376078 Iustin Pop
        nodes as keys and list of OS objects as values, eg::
1861 e4376078 Iustin Pop

1862 e4376078 Iustin Pop
          {"debian-etch": {"node1": [<object>,...],
1863 e4376078 Iustin Pop
                           "node2": [<object>,]}
1864 e4376078 Iustin Pop
          }
1865 1f9430d6 Iustin Pop

1866 1f9430d6 Iustin Pop
    """
1867 1f9430d6 Iustin Pop
    all_os = {}
1868 a6ab004b Iustin Pop
    # we build here the list of nodes that didn't fail the RPC (at RPC
1869 a6ab004b Iustin Pop
    # level), so that nodes with a non-responding node daemon don't
1870 a6ab004b Iustin Pop
    # make all OSes invalid
1871 a6ab004b Iustin Pop
    good_nodes = [node_name for node_name in rlist
1872 a6ab004b Iustin Pop
                  if not rlist[node_name].failed]
1873 1f9430d6 Iustin Pop
    for node_name, nr in rlist.iteritems():
1874 781de953 Iustin Pop
      if nr.failed or not nr.data:
1875 1f9430d6 Iustin Pop
        continue
1876 781de953 Iustin Pop
      for os_obj in nr.data:
1877 b4de68a9 Iustin Pop
        if os_obj.name not in all_os:
1878 1f9430d6 Iustin Pop
          # build a list of nodes for this os containing empty lists
1879 1f9430d6 Iustin Pop
          # for each node in node_list
1880 b4de68a9 Iustin Pop
          all_os[os_obj.name] = {}
1881 a6ab004b Iustin Pop
          for nname in good_nodes:
1882 b4de68a9 Iustin Pop
            all_os[os_obj.name][nname] = []
1883 b4de68a9 Iustin Pop
        all_os[os_obj.name][node_name].append(os_obj)
1884 1f9430d6 Iustin Pop
    return all_os
1885 a8083063 Iustin Pop
1886 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1887 a8083063 Iustin Pop
    """Compute the list of OSes.
1888 a8083063 Iustin Pop

1889 a8083063 Iustin Pop
    """
1890 a6ab004b Iustin Pop
    valid_nodes = [node for node in self.cfg.GetOnlineNodeList()]
1891 94a02bb5 Iustin Pop
    node_data = self.rpc.call_os_diagnose(valid_nodes)
1892 a8083063 Iustin Pop
    if node_data == False:
1893 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1894 94a02bb5 Iustin Pop
    pol = self._DiagnoseByOS(valid_nodes, node_data)
1895 1f9430d6 Iustin Pop
    output = []
1896 1f9430d6 Iustin Pop
    for os_name, os_data in pol.iteritems():
1897 1f9430d6 Iustin Pop
      row = []
1898 1f9430d6 Iustin Pop
      for field in self.op.output_fields:
1899 1f9430d6 Iustin Pop
        if field == "name":
1900 1f9430d6 Iustin Pop
          val = os_name
1901 1f9430d6 Iustin Pop
        elif field == "valid":
1902 1f9430d6 Iustin Pop
          val = utils.all([osl and osl[0] for osl in os_data.values()])
1903 1f9430d6 Iustin Pop
        elif field == "node_status":
1904 1f9430d6 Iustin Pop
          val = {}
1905 1f9430d6 Iustin Pop
          for node_name, nos_list in os_data.iteritems():
1906 1f9430d6 Iustin Pop
            val[node_name] = [(v.status, v.path) for v in nos_list]
1907 1f9430d6 Iustin Pop
        else:
1908 1f9430d6 Iustin Pop
          raise errors.ParameterError(field)
1909 1f9430d6 Iustin Pop
        row.append(val)
1910 1f9430d6 Iustin Pop
      output.append(row)
1911 1f9430d6 Iustin Pop
1912 1f9430d6 Iustin Pop
    return output
1913 a8083063 Iustin Pop
1914 a8083063 Iustin Pop
1915 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1916 a8083063 Iustin Pop
  """Logical unit for removing a node.
1917 a8083063 Iustin Pop

1918 a8083063 Iustin Pop
  """
1919 a8083063 Iustin Pop
  HPATH = "node-remove"
1920 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1921 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1922 a8083063 Iustin Pop
1923 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1924 a8083063 Iustin Pop
    """Build hooks env.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1927 d08869ee Guido Trotter
    node would then be impossible to remove.
1928 a8083063 Iustin Pop

1929 a8083063 Iustin Pop
    """
1930 396e1b78 Michael Hanselmann
    env = {
1931 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1932 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1933 396e1b78 Michael Hanselmann
      }
1934 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1935 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1936 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1937 a8083063 Iustin Pop
1938 a8083063 Iustin Pop
  def CheckPrereq(self):
1939 a8083063 Iustin Pop
    """Check prerequisites.
1940 a8083063 Iustin Pop

1941 a8083063 Iustin Pop
    This checks:
1942 a8083063 Iustin Pop
     - the node exists in the configuration
1943 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1944 a8083063 Iustin Pop
     - it's not the master
1945 a8083063 Iustin Pop

1946 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
1947 a8083063 Iustin Pop

1948 a8083063 Iustin Pop
    """
1949 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1950 a8083063 Iustin Pop
    if node is None:
1951 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1952 a8083063 Iustin Pop
1953 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1954 a8083063 Iustin Pop
1955 d6a02168 Michael Hanselmann
    masternode = self.cfg.GetMasterNode()
1956 a8083063 Iustin Pop
    if node.name == masternode:
1957 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1958 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1959 a8083063 Iustin Pop
1960 a8083063 Iustin Pop
    for instance_name in instance_list:
1961 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1962 6b12959c Iustin Pop
      if node.name in instance.all_nodes:
1963 6b12959c Iustin Pop
        raise errors.OpPrereqError("Instance %s is still running on the node,"
1964 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1965 a8083063 Iustin Pop
    self.op.node_name = node.name
1966 a8083063 Iustin Pop
    self.node = node
1967 a8083063 Iustin Pop
1968 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1969 a8083063 Iustin Pop
    """Removes the node from the cluster.
1970 a8083063 Iustin Pop

1971 a8083063 Iustin Pop
    """
1972 a8083063 Iustin Pop
    node = self.node
1973 9a4f63d1 Iustin Pop
    logging.info("Stopping the node daemon and removing configs from node %s",
1974 9a4f63d1 Iustin Pop
                 node.name)
1975 a8083063 Iustin Pop
1976 d8470559 Michael Hanselmann
    self.context.RemoveNode(node.name)
1977 a8083063 Iustin Pop
1978 72737a7f Iustin Pop
    self.rpc.call_node_leave_cluster(node.name)
1979 c8a0948f Michael Hanselmann
1980 eb1742d5 Guido Trotter
    # Promote nodes to master candidate as needed
1981 ec0292f1 Iustin Pop
    _AdjustCandidatePool(self)
1982 eb1742d5 Guido Trotter
1983 a8083063 Iustin Pop
1984 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1985 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1986 a8083063 Iustin Pop

1987 a8083063 Iustin Pop
  """
1988 bc8e4a1a Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
1989 35705d8f Guido Trotter
  REQ_BGL = False
1990 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet(
1991 31bf511f Iustin Pop
    "dtotal", "dfree",
1992 31bf511f Iustin Pop
    "mtotal", "mnode", "mfree",
1993 31bf511f Iustin Pop
    "bootid",
1994 0105bad3 Iustin Pop
    "ctotal", "cnodes", "csockets",
1995 31bf511f Iustin Pop
    )
1996 31bf511f Iustin Pop
1997 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(
1998 31bf511f Iustin Pop
    "name", "pinst_cnt", "sinst_cnt",
1999 31bf511f Iustin Pop
    "pinst_list", "sinst_list",
2000 31bf511f Iustin Pop
    "pip", "sip", "tags",
2001 31bf511f Iustin Pop
    "serial_no",
2002 0e67cdbe Iustin Pop
    "master_candidate",
2003 0e67cdbe Iustin Pop
    "master",
2004 9ddb5e45 Iustin Pop
    "offline",
2005 0b2454b9 Iustin Pop
    "drained",
2006 c120ff34 Iustin Pop
    "role",
2007 31bf511f Iustin Pop
    )
2008 a8083063 Iustin Pop
2009 35705d8f Guido Trotter
  def ExpandNames(self):
2010 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2011 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2012 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2013 a8083063 Iustin Pop
2014 35705d8f Guido Trotter
    self.needed_locks = {}
2015 35705d8f Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2016 c8d8b4c8 Iustin Pop
2017 c8d8b4c8 Iustin Pop
    if self.op.names:
2018 c8d8b4c8 Iustin Pop
      self.wanted = _GetWantedNodes(self, self.op.names)
2019 35705d8f Guido Trotter
    else:
2020 c8d8b4c8 Iustin Pop
      self.wanted = locking.ALL_SET
2021 c8d8b4c8 Iustin Pop
2022 bc8e4a1a Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
2023 bc8e4a1a Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
2024 c8d8b4c8 Iustin Pop
    if self.do_locking:
2025 c8d8b4c8 Iustin Pop
      # if we don't request only static fields, we need to lock the nodes
2026 c8d8b4c8 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = self.wanted
2027 c8d8b4c8 Iustin Pop
2028 35705d8f Guido Trotter
2029 35705d8f Guido Trotter
  def CheckPrereq(self):
2030 35705d8f Guido Trotter
    """Check prerequisites.
2031 35705d8f Guido Trotter

2032 35705d8f Guido Trotter
    """
2033 c8d8b4c8 Iustin Pop
    # The validation of the node list is done in the _GetWantedNodes,
2034 c8d8b4c8 Iustin Pop
    # if non empty, and if empty, there's no validation to do
2035 c8d8b4c8 Iustin Pop
    pass
2036 a8083063 Iustin Pop
2037 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2038 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2039 a8083063 Iustin Pop

2040 a8083063 Iustin Pop
    """
2041 c8d8b4c8 Iustin Pop
    all_info = self.cfg.GetAllNodesInfo()
2042 c8d8b4c8 Iustin Pop
    if self.do_locking:
2043 c8d8b4c8 Iustin Pop
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
2044 3fa93523 Guido Trotter
    elif self.wanted != locking.ALL_SET:
2045 3fa93523 Guido Trotter
      nodenames = self.wanted
2046 3fa93523 Guido Trotter
      missing = set(nodenames).difference(all_info.keys())
2047 3fa93523 Guido Trotter
      if missing:
2048 7b3a8fb5 Iustin Pop
        raise errors.OpExecError(
2049 3fa93523 Guido Trotter
          "Some nodes were removed before retrieving their data: %s" % missing)
2050 c8d8b4c8 Iustin Pop
    else:
2051 c8d8b4c8 Iustin Pop
      nodenames = all_info.keys()
2052 c1f1cbb2 Iustin Pop
2053 c1f1cbb2 Iustin Pop
    nodenames = utils.NiceSort(nodenames)
2054 c8d8b4c8 Iustin Pop
    nodelist = [all_info[name] for name in nodenames]
2055 a8083063 Iustin Pop
2056 a8083063 Iustin Pop
    # begin data gathering
2057 a8083063 Iustin Pop
2058 bc8e4a1a Iustin Pop
    if self.do_node_query:
2059 a8083063 Iustin Pop
      live_data = {}
2060 72737a7f Iustin Pop
      node_data = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
2061 72737a7f Iustin Pop
                                          self.cfg.GetHypervisorType())
2062 a8083063 Iustin Pop
      for name in nodenames:
2063 781de953 Iustin Pop
        nodeinfo = node_data[name]
2064 781de953 Iustin Pop
        if not nodeinfo.failed and nodeinfo.data:
2065 781de953 Iustin Pop
          nodeinfo = nodeinfo.data
2066 d599d686 Iustin Pop
          fn = utils.TryConvert
2067 a8083063 Iustin Pop
          live_data[name] = {
2068 d599d686 Iustin Pop
            "mtotal": fn(int, nodeinfo.get('memory_total', None)),
2069 d599d686 Iustin Pop
            "mnode": fn(int, nodeinfo.get('memory_dom0', None)),
2070 d599d686 Iustin Pop
            "mfree": fn(int, nodeinfo.get('memory_free', None)),
2071 d599d686 Iustin Pop
            "dtotal": fn(int, nodeinfo.get('vg_size', None)),
2072 d599d686 Iustin Pop
            "dfree": fn(int, nodeinfo.get('vg_free', None)),
2073 d599d686 Iustin Pop
            "ctotal": fn(int, nodeinfo.get('cpu_total', None)),
2074 d599d686 Iustin Pop
            "bootid": nodeinfo.get('bootid', None),
2075 0105bad3 Iustin Pop
            "cnodes": fn(int, nodeinfo.get('cpu_nodes', None)),
2076 0105bad3 Iustin Pop
            "csockets": fn(int, nodeinfo.get('cpu_sockets', None)),
2077 a8083063 Iustin Pop
            }
2078 a8083063 Iustin Pop
        else:
2079 a8083063 Iustin Pop
          live_data[name] = {}
2080 a8083063 Iustin Pop
    else:
2081 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
2082 a8083063 Iustin Pop
2083 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
2084 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
2085 a8083063 Iustin Pop
2086 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
2087 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
2088 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
2089 4dfd6266 Iustin Pop
      inst_data = self.cfg.GetAllInstancesInfo()
2090 a8083063 Iustin Pop
2091 4dfd6266 Iustin Pop
      for instance_name, inst in inst_data.items():
2092 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
2093 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
2094 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
2095 ec223efb Iustin Pop
          if secnode in node_to_secondary:
2096 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
2097 a8083063 Iustin Pop
2098 0e67cdbe Iustin Pop
    master_node = self.cfg.GetMasterNode()
2099 0e67cdbe Iustin Pop
2100 a8083063 Iustin Pop
    # end data gathering
2101 a8083063 Iustin Pop
2102 a8083063 Iustin Pop
    output = []
2103 a8083063 Iustin Pop
    for node in nodelist:
2104 a8083063 Iustin Pop
      node_output = []
2105 a8083063 Iustin Pop
      for field in self.op.output_fields:
2106 a8083063 Iustin Pop
        if field == "name":
2107 a8083063 Iustin Pop
          val = node.name
2108 ec223efb Iustin Pop
        elif field == "pinst_list":
2109 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
2110 ec223efb Iustin Pop
        elif field == "sinst_list":
2111 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
2112 ec223efb Iustin Pop
        elif field == "pinst_cnt":
2113 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
2114 ec223efb Iustin Pop
        elif field == "sinst_cnt":
2115 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
2116 a8083063 Iustin Pop
        elif field == "pip":
2117 a8083063 Iustin Pop
          val = node.primary_ip
2118 a8083063 Iustin Pop
        elif field == "sip":
2119 a8083063 Iustin Pop
          val = node.secondary_ip
2120 130a6a6f Iustin Pop
        elif field == "tags":
2121 130a6a6f Iustin Pop
          val = list(node.GetTags())
2122 38d7239a Iustin Pop
        elif field == "serial_no":
2123 38d7239a Iustin Pop
          val = node.serial_no
2124 0e67cdbe Iustin Pop
        elif field == "master_candidate":
2125 0e67cdbe Iustin Pop
          val = node.master_candidate
2126 0e67cdbe Iustin Pop
        elif field == "master":
2127 0e67cdbe Iustin Pop
          val = node.name == master_node
2128 9ddb5e45 Iustin Pop
        elif field == "offline":
2129 9ddb5e45 Iustin Pop
          val = node.offline
2130 0b2454b9 Iustin Pop
        elif field == "drained":
2131 0b2454b9 Iustin Pop
          val = node.drained
2132 31bf511f Iustin Pop
        elif self._FIELDS_DYNAMIC.Matches(field):
2133 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
2134 c120ff34 Iustin Pop
        elif field == "role":
2135 c120ff34 Iustin Pop
          if node.name == master_node:
2136 c120ff34 Iustin Pop
            val = "M"
2137 c120ff34 Iustin Pop
          elif node.master_candidate:
2138 c120ff34 Iustin Pop
            val = "C"
2139 c120ff34 Iustin Pop
          elif node.drained:
2140 c120ff34 Iustin Pop
            val = "D"
2141 c120ff34 Iustin Pop
          elif node.offline:
2142 c120ff34 Iustin Pop
            val = "O"
2143 c120ff34 Iustin Pop
          else:
2144 c120ff34 Iustin Pop
            val = "R"
2145 a8083063 Iustin Pop
        else:
2146 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2147 a8083063 Iustin Pop
        node_output.append(val)
2148 a8083063 Iustin Pop
      output.append(node_output)
2149 a8083063 Iustin Pop
2150 a8083063 Iustin Pop
    return output
2151 a8083063 Iustin Pop
2152 a8083063 Iustin Pop
2153 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
2154 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
2155 dcb93971 Michael Hanselmann

2156 dcb93971 Michael Hanselmann
  """
2157 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
2158 21a15682 Guido Trotter
  REQ_BGL = False
2159 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("phys", "vg", "name", "size", "instance")
2160 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("node")
2161 21a15682 Guido Trotter
2162 21a15682 Guido Trotter
  def ExpandNames(self):
2163 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2164 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2165 21a15682 Guido Trotter
                       selected=self.op.output_fields)
2166 21a15682 Guido Trotter
2167 21a15682 Guido Trotter
    self.needed_locks = {}
2168 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
2169 21a15682 Guido Trotter
    if not self.op.nodes:
2170 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2171 21a15682 Guido Trotter
    else:
2172 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
2173 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
2174 dcb93971 Michael Hanselmann
2175 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
2176 dcb93971 Michael Hanselmann
    """Check prerequisites.
2177 dcb93971 Michael Hanselmann

2178 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
2179 dcb93971 Michael Hanselmann

2180 dcb93971 Michael Hanselmann
    """
2181 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
2182 dcb93971 Michael Hanselmann
2183 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
2184 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
2185 dcb93971 Michael Hanselmann

2186 dcb93971 Michael Hanselmann
    """
2187 a7ba5e53 Iustin Pop
    nodenames = self.nodes
2188 72737a7f Iustin Pop
    volumes = self.rpc.call_node_volumes(nodenames)
2189 dcb93971 Michael Hanselmann
2190 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
2191 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
2192 dcb93971 Michael Hanselmann
2193 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
2194 dcb93971 Michael Hanselmann
2195 dcb93971 Michael Hanselmann
    output = []
2196 dcb93971 Michael Hanselmann
    for node in nodenames:
2197 781de953 Iustin Pop
      if node not in volumes or volumes[node].failed or not volumes[node].data:
2198 37d19eb2 Michael Hanselmann
        continue
2199 37d19eb2 Michael Hanselmann
2200 781de953 Iustin Pop
      node_vols = volumes[node].data[:]
2201 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
2202 dcb93971 Michael Hanselmann
2203 dcb93971 Michael Hanselmann
      for vol in node_vols:
2204 dcb93971 Michael Hanselmann
        node_output = []
2205 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
2206 dcb93971 Michael Hanselmann
          if field == "node":
2207 dcb93971 Michael Hanselmann
            val = node
2208 dcb93971 Michael Hanselmann
          elif field == "phys":
2209 dcb93971 Michael Hanselmann
            val = vol['dev']
2210 dcb93971 Michael Hanselmann
          elif field == "vg":
2211 dcb93971 Michael Hanselmann
            val = vol['vg']
2212 dcb93971 Michael Hanselmann
          elif field == "name":
2213 dcb93971 Michael Hanselmann
            val = vol['name']
2214 dcb93971 Michael Hanselmann
          elif field == "size":
2215 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
2216 dcb93971 Michael Hanselmann
          elif field == "instance":
2217 dcb93971 Michael Hanselmann
            for inst in ilist:
2218 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
2219 dcb93971 Michael Hanselmann
                continue
2220 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
2221 dcb93971 Michael Hanselmann
                val = inst.name
2222 dcb93971 Michael Hanselmann
                break
2223 dcb93971 Michael Hanselmann
            else:
2224 dcb93971 Michael Hanselmann
              val = '-'
2225 dcb93971 Michael Hanselmann
          else:
2226 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
2227 dcb93971 Michael Hanselmann
          node_output.append(str(val))
2228 dcb93971 Michael Hanselmann
2229 dcb93971 Michael Hanselmann
        output.append(node_output)
2230 dcb93971 Michael Hanselmann
2231 dcb93971 Michael Hanselmann
    return output
2232 dcb93971 Michael Hanselmann
2233 dcb93971 Michael Hanselmann
2234 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
2235 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
2236 a8083063 Iustin Pop

2237 a8083063 Iustin Pop
  """
2238 a8083063 Iustin Pop
  HPATH = "node-add"
2239 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2240 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
2241 a8083063 Iustin Pop
2242 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2243 a8083063 Iustin Pop
    """Build hooks env.
2244 a8083063 Iustin Pop

2245 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
2246 a8083063 Iustin Pop

2247 a8083063 Iustin Pop
    """
2248 a8083063 Iustin Pop
    env = {
2249 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
2250 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
2251 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
2252 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
2253 a8083063 Iustin Pop
      }
2254 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
2255 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
2256 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
2257 a8083063 Iustin Pop
2258 a8083063 Iustin Pop
  def CheckPrereq(self):
2259 a8083063 Iustin Pop
    """Check prerequisites.
2260 a8083063 Iustin Pop

2261 a8083063 Iustin Pop
    This checks:
2262 a8083063 Iustin Pop
     - the new node is not already in the config
2263 a8083063 Iustin Pop
     - it is resolvable
2264 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
2265 a8083063 Iustin Pop

2266 5bbd3f7f Michael Hanselmann
    Any errors are signaled by raising errors.OpPrereqError.
2267 a8083063 Iustin Pop

2268 a8083063 Iustin Pop
    """
2269 a8083063 Iustin Pop
    node_name = self.op.node_name
2270 a8083063 Iustin Pop
    cfg = self.cfg
2271 a8083063 Iustin Pop
2272 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
2273 a8083063 Iustin Pop
2274 bcf043c9 Iustin Pop
    node = dns_data.name
2275 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
2276 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
2277 a8083063 Iustin Pop
    if secondary_ip is None:
2278 a8083063 Iustin Pop
      secondary_ip = primary_ip
2279 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
2280 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
2281 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
2282 e7c6e02b Michael Hanselmann
2283 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
2284 e7c6e02b Michael Hanselmann
    if not self.op.readd and node in node_list:
2285 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is already in the configuration" %
2286 e7c6e02b Michael Hanselmann
                                 node)
2287 e7c6e02b Michael Hanselmann
    elif self.op.readd and node not in node_list:
2288 e7c6e02b Michael Hanselmann
      raise errors.OpPrereqError("Node %s is not in the configuration" % node)
2289 a8083063 Iustin Pop
2290 a8083063 Iustin Pop
    for existing_node_name in node_list:
2291 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
2292 e7c6e02b Michael Hanselmann
2293 e7c6e02b Michael Hanselmann
      if self.op.readd and node == existing_node_name:
2294 e7c6e02b Michael Hanselmann
        if (existing_node.primary_ip != primary_ip or
2295 e7c6e02b Michael Hanselmann
            existing_node.secondary_ip != secondary_ip):
2296 e7c6e02b Michael Hanselmann
          raise errors.OpPrereqError("Readded node doesn't have the same IP"
2297 e7c6e02b Michael Hanselmann
                                     " address configuration as before")
2298 e7c6e02b Michael Hanselmann
        continue
2299 e7c6e02b Michael Hanselmann
2300 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
2301 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
2302 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
2303 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
2304 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
2305 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
2306 a8083063 Iustin Pop
2307 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
2308 a8083063 Iustin Pop
    # same as for the master
2309 d6a02168 Michael Hanselmann
    myself = cfg.GetNodeInfo(self.cfg.GetMasterNode())
2310 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
2311 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
2312 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
2313 a8083063 Iustin Pop
      if master_singlehomed:
2314 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
2315 3ecf6786 Iustin Pop
                                   " new node has one")
2316 a8083063 Iustin Pop
      else:
2317 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
2318 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
2319 a8083063 Iustin Pop
2320 5bbd3f7f Michael Hanselmann
    # checks reachability
2321 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2322 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
2323 a8083063 Iustin Pop
2324 a8083063 Iustin Pop
    if not newbie_singlehomed:
2325 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
2326 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
2327 b15d625f Iustin Pop
                           source=myself.secondary_ip):
2328 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
2329 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
2330 a8083063 Iustin Pop
2331 0fff97e9 Guido Trotter
    cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2332 a8ae3eb5 Iustin Pop
    if self.op.readd:
2333 a8ae3eb5 Iustin Pop
      exceptions = [node]
2334 a8ae3eb5 Iustin Pop
    else:
2335 a8ae3eb5 Iustin Pop
      exceptions = []
2336 a8ae3eb5 Iustin Pop
    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
2337 a8ae3eb5 Iustin Pop
    # the new node will increase mc_max with one, so:
2338 a8ae3eb5 Iustin Pop
    mc_max = min(mc_max + 1, cp_size)
2339 a8ae3eb5 Iustin Pop
    self.master_candidate = mc_now < mc_max
2340 0fff97e9 Guido Trotter
2341 a8ae3eb5 Iustin Pop
    if self.op.readd:
2342 a8ae3eb5 Iustin Pop
      self.new_node = self.cfg.GetNodeInfo(node)
2343 a8ae3eb5 Iustin Pop
      assert self.new_node is not None, "Can't retrieve locked node %s" % node
2344 a8ae3eb5 Iustin Pop
    else:
2345 a8ae3eb5 Iustin Pop
      self.new_node = objects.Node(name=node,
2346 a8ae3eb5 Iustin Pop
                                   primary_ip=primary_ip,
2347 a8ae3eb5 Iustin Pop
                                   secondary_ip=secondary_ip,
2348 a8ae3eb5 Iustin Pop
                                   master_candidate=self.master_candidate,
2349 a8ae3eb5 Iustin Pop
                                   offline=False, drained=False)
2350 a8083063 Iustin Pop
2351 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2352 a8083063 Iustin Pop
    """Adds the new node to the cluster.
2353 a8083063 Iustin Pop

2354 a8083063 Iustin Pop
    """
2355 a8083063 Iustin Pop
    new_node = self.new_node
2356 a8083063 Iustin Pop
    node = new_node.name
2357 a8083063 Iustin Pop
2358 a8ae3eb5 Iustin Pop
    # for re-adds, reset the offline/drained/master-candidate flags;
2359 a8ae3eb5 Iustin Pop
    # we need to reset here, otherwise offline would prevent RPC calls
2360 a8ae3eb5 Iustin Pop
    # later in the procedure; this also means that if the re-add
2361 a8ae3eb5 Iustin Pop
    # fails, we are left with a non-offlined, broken node
2362 a8ae3eb5 Iustin Pop
    if self.op.readd:
2363 a8ae3eb5 Iustin Pop
      new_node.drained = new_node.offline = False
2364 a8ae3eb5 Iustin Pop
      self.LogInfo("Readding a node, the offline/drained flags were reset")
2365 a8ae3eb5 Iustin Pop
      # if we demote the node, we do cleanup later in the procedure
2366 a8ae3eb5 Iustin Pop
      new_node.master_candidate = self.master_candidate
2367 a8ae3eb5 Iustin Pop
2368 a8ae3eb5 Iustin Pop
    # notify the user about any possible mc promotion
2369 a8ae3eb5 Iustin Pop
    if new_node.master_candidate:
2370 a8ae3eb5 Iustin Pop
      self.LogInfo("Node will be a master candidate")
2371 a8ae3eb5 Iustin Pop
2372 a8083063 Iustin Pop
    # check connectivity
2373 72737a7f Iustin Pop
    result = self.rpc.call_version([node])[node]
2374 781de953 Iustin Pop
    result.Raise()
2375 781de953 Iustin Pop
    if result.data:
2376 781de953 Iustin Pop
      if constants.PROTOCOL_VERSION == result.data:
2377 9a4f63d1 Iustin Pop
        logging.info("Communication to node %s fine, sw version %s match",
2378 781de953 Iustin Pop
                     node, result.data)
2379 a8083063 Iustin Pop
      else:
2380 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
2381 3ecf6786 Iustin Pop
                                 " node version %s" %
2382 781de953 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result.data))
2383 a8083063 Iustin Pop
    else:
2384 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
2385 a8083063 Iustin Pop
2386 a8083063 Iustin Pop
    # setup ssh on node
2387 9a4f63d1 Iustin Pop
    logging.info("Copy ssh key to node %s", node)
2388 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
2389 a8083063 Iustin Pop
    keyarray = []
2390 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
2391 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
2392 70d9e3d8 Iustin Pop
                priv_key, pub_key]
2393 a8083063 Iustin Pop
2394 a8083063 Iustin Pop
    for i in keyfiles:
2395 a8083063 Iustin Pop
      f = open(i, 'r')
2396 a8083063 Iustin Pop
      try:
2397 a8083063 Iustin Pop
        keyarray.append(f.read())
2398 a8083063 Iustin Pop
      finally:
2399 a8083063 Iustin Pop
        f.close()
2400 a8083063 Iustin Pop
2401 72737a7f Iustin Pop
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2402 72737a7f Iustin Pop
                                    keyarray[2],
2403 72737a7f Iustin Pop
                                    keyarray[3], keyarray[4], keyarray[5])
2404 a8083063 Iustin Pop
2405 a1b805fb Iustin Pop
    msg = result.RemoteFailMsg()
2406 a1b805fb Iustin Pop
    if msg:
2407 a1b805fb Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2408 a1b805fb Iustin Pop
                               " new node: %s" % msg)
2409 a8083063 Iustin Pop
2410 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
2411 aafb303d Guido Trotter
    if self.cfg.GetClusterInfo().modify_etc_hosts:
2412 aafb303d Guido Trotter
      utils.AddHostToEtcHosts(new_node.name)
2413 c8a0948f Michael Hanselmann
2414 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
2415 781de953 Iustin Pop
      result = self.rpc.call_node_has_ip_address(new_node.name,
2416 781de953 Iustin Pop
                                                 new_node.secondary_ip)
2417 781de953 Iustin Pop
      if result.failed or not result.data:
2418 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2419 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
2420 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
2421 a8083063 Iustin Pop
2422 d6a02168 Michael Hanselmann
    node_verify_list = [self.cfg.GetMasterNode()]
2423 5c0527ed Guido Trotter
    node_verify_param = {
2424 5c0527ed Guido Trotter
      'nodelist': [node],
2425 5c0527ed Guido Trotter
      # TODO: do a node-net-test as well?
2426 5c0527ed Guido Trotter
    }
2427 5c0527ed Guido Trotter
2428 72737a7f Iustin Pop
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2429 72737a7f Iustin Pop
                                       self.cfg.GetClusterName())
2430 5c0527ed Guido Trotter
    for verifier in node_verify_list:
2431 f08ce603 Guido Trotter
      if result[verifier].failed or not result[verifier].data:
2432 5c0527ed Guido Trotter
        raise errors.OpExecError("Cannot communicate with %s's node daemon"
2433 5c0527ed Guido Trotter
                                 " for remote verification" % verifier)
2434 781de953 Iustin Pop
      if result[verifier].data['nodelist']:
2435 781de953 Iustin Pop
        for failed in result[verifier].data['nodelist']:
2436 31821208 Iustin Pop
          feedback_fn("ssh/hostname verification failed"
2437 31821208 Iustin Pop
                      " (checking from %s): %s" %
2438 bafc1d90 Iustin Pop
                      (verifier, result[verifier].data['nodelist'][failed]))
2439 5c0527ed Guido Trotter
        raise errors.OpExecError("ssh/hostname verification failed.")
2440 ff98055b Iustin Pop
2441 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
2442 a8083063 Iustin Pop
    # including the node just added
2443 d6a02168 Michael Hanselmann
    myself = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
2444 102b115b Michael Hanselmann
    dist_nodes = self.cfg.GetNodeList()
2445 102b115b Michael Hanselmann
    if not self.op.readd:
2446 102b115b Michael Hanselmann
      dist_nodes.append(node)
2447 a8083063 Iustin Pop
    if myself.name in dist_nodes:
2448 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
2449 a8083063 Iustin Pop
2450 9a4f63d1 Iustin Pop
    logging.debug("Copying hosts and known_hosts to all nodes")
2451 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
2452 72737a7f Iustin Pop
      result = self.rpc.call_upload_file(dist_nodes, fname)
2453 ec85e3d5 Iustin Pop
      for to_node, to_result in result.iteritems():
2454 ec85e3d5 Iustin Pop
        if to_result.failed or not to_result.data:
2455 9a4f63d1 Iustin Pop
          logging.error("Copy of file %s to node %s failed", fname, to_node)
2456 a8083063 Iustin Pop
2457 d6a02168 Michael Hanselmann
    to_copy = []
2458 2928f08d Guido Trotter
    enabled_hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
2459 ccd905ac Guido Trotter
    if constants.HTS_COPY_VNC_PASSWORD.intersection(enabled_hypervisors):
2460 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
2461 2928f08d Guido Trotter
2462 a8083063 Iustin Pop
    for fname in to_copy:
2463 72737a7f Iustin Pop
      result = self.rpc.call_upload_file([node], fname)
2464 781de953 Iustin Pop
      if result[node].failed or not result[node]:
2465 9a4f63d1 Iustin Pop
        logging.error("Could not copy file %s to node %s", fname, node)
2466 a8083063 Iustin Pop
2467 d8470559 Michael Hanselmann
    if self.op.readd:
2468 d8470559 Michael Hanselmann
      self.context.ReaddNode(new_node)
2469 a8ae3eb5 Iustin Pop
      # make sure we redistribute the config
2470 a8ae3eb5 Iustin Pop
      self.cfg.Update(new_node)
2471 a8ae3eb5 Iustin Pop
      # and make sure the new node will not have old files around
2472 a8ae3eb5 Iustin Pop
      if not new_node.master_candidate:
2473 a8ae3eb5 Iustin Pop
        result = self.rpc.call_node_demote_from_mc(new_node.name)
2474 a8ae3eb5 Iustin Pop
        msg = result.RemoteFailMsg()
2475 a8ae3eb5 Iustin Pop
        if msg:
2476 a8ae3eb5 Iustin Pop
          self.LogWarning("Node failed to demote itself from master"
2477 a8ae3eb5 Iustin Pop
                          " candidate status: %s" % msg)
2478 d8470559 Michael Hanselmann
    else:
2479 d8470559 Michael Hanselmann
      self.context.AddNode(new_node)
2480 a8083063 Iustin Pop
2481 a8083063 Iustin Pop
2482 b31c8676 Iustin Pop
class LUSetNodeParams(LogicalUnit):
2483 b31c8676 Iustin Pop
  """Modifies the parameters of a node.
2484 b31c8676 Iustin Pop

2485 b31c8676 Iustin Pop
  """
2486 b31c8676 Iustin Pop
  HPATH = "node-modify"
2487 b31c8676 Iustin Pop
  HTYPE = constants.HTYPE_NODE
2488 b31c8676 Iustin Pop
  _OP_REQP = ["node_name"]
2489 b31c8676 Iustin Pop
  REQ_BGL = False
2490 b31c8676 Iustin Pop
2491 b31c8676 Iustin Pop
  def CheckArguments(self):
2492 b31c8676 Iustin Pop
    node_name = self.cfg.ExpandNodeName(self.op.node_name)
2493 b31c8676 Iustin Pop
    if node_name is None:
2494 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name)
2495 b31c8676 Iustin Pop
    self.op.node_name = node_name
2496 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'master_candidate')
2497 3a5ba66a Iustin Pop
    _CheckBooleanOpField(self.op, 'offline')
2498 c9d443ea Iustin Pop
    _CheckBooleanOpField(self.op, 'drained')
2499 c9d443ea Iustin Pop
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained]
2500 c9d443ea Iustin Pop
    if all_mods.count(None) == 3:
2501 b31c8676 Iustin Pop
      raise errors.OpPrereqError("Please pass at least one modification")
2502 c9d443ea Iustin Pop
    if all_mods.count(True) > 1:
2503 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Can't set the node into more than one"
2504 c9d443ea Iustin Pop
                                 " state at the same time")
2505 b31c8676 Iustin Pop
2506 b31c8676 Iustin Pop
  def ExpandNames(self):
2507 b31c8676 Iustin Pop
    self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
2508 b31c8676 Iustin Pop
2509 b31c8676 Iustin Pop
  def BuildHooksEnv(self):
2510 b31c8676 Iustin Pop
    """Build hooks env.
2511 b31c8676 Iustin Pop

2512 b31c8676 Iustin Pop
    This runs on the master node.
2513 b31c8676 Iustin Pop

2514 b31c8676 Iustin Pop
    """
2515 b31c8676 Iustin Pop
    env = {
2516 b31c8676 Iustin Pop
      "OP_TARGET": self.op.node_name,
2517 b31c8676 Iustin Pop
      "MASTER_CANDIDATE": str(self.op.master_candidate),
2518 3a5ba66a Iustin Pop
      "OFFLINE": str(self.op.offline),
2519 c9d443ea Iustin Pop
      "DRAINED": str(self.op.drained),
2520 b31c8676 Iustin Pop
      }
2521 b31c8676 Iustin Pop
    nl = [self.cfg.GetMasterNode(),
2522 b31c8676 Iustin Pop
          self.op.node_name]
2523 b31c8676 Iustin Pop
    return env, nl, nl
2524 b31c8676 Iustin Pop
2525 b31c8676 Iustin Pop
  def CheckPrereq(self):
2526 b31c8676 Iustin Pop
    """Check prerequisites.
2527 b31c8676 Iustin Pop

2528 b31c8676 Iustin Pop
    This only checks the instance list against the existing names.
2529 b31c8676 Iustin Pop

2530 b31c8676 Iustin Pop
    """
2531 3a5ba66a Iustin Pop
    node = self.node = self.cfg.GetNodeInfo(self.op.node_name)
2532 b31c8676 Iustin Pop
2533 97c61d46 Iustin Pop
    if (self.op.master_candidate is not None or
2534 97c61d46 Iustin Pop
        self.op.drained is not None or
2535 97c61d46 Iustin Pop
        self.op.offline is not None):
2536 97c61d46 Iustin Pop
      # we can't change the master's node flags
2537 97c61d46 Iustin Pop
      if self.op.node_name == self.cfg.GetMasterNode():
2538 97c61d46 Iustin Pop
        raise errors.OpPrereqError("The master role can be changed"
2539 97c61d46 Iustin Pop
                                   " only via masterfailover")
2540 97c61d46 Iustin Pop
2541 c9d443ea Iustin Pop
    if ((self.op.master_candidate == False or self.op.offline == True or
2542 c9d443ea Iustin Pop
         self.op.drained == True) and node.master_candidate):
2543 3e83dd48 Iustin Pop
      cp_size = self.cfg.GetClusterInfo().candidate_pool_size
2544 3a5ba66a Iustin Pop
      num_candidates, _ = self.cfg.GetMasterCandidateStats()
2545 3e83dd48 Iustin Pop
      if num_candidates <= cp_size:
2546 3e83dd48 Iustin Pop
        msg = ("Not enough master candidates (desired"
2547 3e83dd48 Iustin Pop
               " %d, new value will be %d)" % (cp_size, num_candidates-1))
2548 3a5ba66a Iustin Pop
        if self.op.force:
2549 3e83dd48 Iustin Pop
          self.LogWarning(msg)
2550 3e83dd48 Iustin Pop
        else:
2551 3e83dd48 Iustin Pop
          raise errors.OpPrereqError(msg)
2552 3e83dd48 Iustin Pop
2553 c9d443ea Iustin Pop
    if (self.op.master_candidate == True and
2554 c9d443ea Iustin Pop
        ((node.offline and not self.op.offline == False) or
2555 c9d443ea Iustin Pop
         (node.drained and not self.op.drained == False))):
2556 c9d443ea Iustin Pop
      raise errors.OpPrereqError("Node '%s' is offline or drained, can't set"
2557 949bdabe Iustin Pop
                                 " to master_candidate" % node.name)
2558 3a5ba66a Iustin Pop
2559 b31c8676 Iustin Pop
    return
2560 b31c8676 Iustin Pop
2561 b31c8676 Iustin Pop
  def Exec(self, feedback_fn):
2562 b31c8676 Iustin Pop
    """Modifies a node.
2563 b31c8676 Iustin Pop

2564 b31c8676 Iustin Pop
    """
2565 3a5ba66a Iustin Pop
    node = self.node
2566 b31c8676 Iustin Pop
2567 b31c8676 Iustin Pop
    result = []
2568 c9d443ea Iustin Pop
    changed_mc = False
2569 b31c8676 Iustin Pop
2570 3a5ba66a Iustin Pop
    if self.op.offline is not None:
2571 3a5ba66a Iustin Pop
      node.offline = self.op.offline
2572 3a5ba66a Iustin Pop
      result.append(("offline", str(self.op.offline)))
2573 c9d443ea Iustin Pop
      if self.op.offline == True:
2574 c9d443ea Iustin Pop
        if node.master_candidate:
2575 c9d443ea Iustin Pop
          node.master_candidate = False
2576 c9d443ea Iustin Pop
          changed_mc = True
2577 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to offline"))
2578 c9d443ea Iustin Pop
        if node.drained:
2579 c9d443ea Iustin Pop
          node.drained = False
2580 c9d443ea Iustin Pop
          result.append(("drained", "clear drained status due to offline"))
2581 3a5ba66a Iustin Pop
2582 b31c8676 Iustin Pop
    if self.op.master_candidate is not None:
2583 b31c8676 Iustin Pop
      node.master_candidate = self.op.master_candidate
2584 c9d443ea Iustin Pop
      changed_mc = True
2585 b31c8676 Iustin Pop
      result.append(("master_candidate", str(self.op.master_candidate)))
2586 56aa9fd5 Iustin Pop
      if self.op.master_candidate == False:
2587 56aa9fd5 Iustin Pop
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2588 0959c824 Iustin Pop
        msg = rrc.RemoteFailMsg()
2589 0959c824 Iustin Pop
        if msg:
2590 0959c824 Iustin Pop
          self.LogWarning("Node failed to demote itself: %s" % msg)
2591 b31c8676 Iustin Pop
2592 c9d443ea Iustin Pop
    if self.op.drained is not None:
2593 c9d443ea Iustin Pop
      node.drained = self.op.drained
2594 82e12743 Iustin Pop
      result.append(("drained", str(self.op.drained)))
2595 c9d443ea Iustin Pop
      if self.op.drained == True:
2596 c9d443ea Iustin Pop
        if node.master_candidate:
2597 c9d443ea Iustin Pop
          node.master_candidate = False
2598 c9d443ea Iustin Pop
          changed_mc = True
2599 c9d443ea Iustin Pop
          result.append(("master_candidate", "auto-demotion due to drain"))
2600 dec0d9da Iustin Pop
          rrc = self.rpc.call_node_demote_from_mc(node.name)
2601 dec0d9da Iustin Pop
          msg = rrc.RemoteFailMsg()
2602 dec0d9da Iustin Pop
          if msg:
2603 dec0d9da Iustin Pop
            self.LogWarning("Node failed to demote itself: %s" % msg)
2604 c9d443ea Iustin Pop
        if node.offline:
2605 c9d443ea Iustin Pop
          node.offline = False
2606 c9d443ea Iustin Pop
          result.append(("offline", "clear offline status due to drain"))
2607 c9d443ea Iustin Pop
2608 b31c8676 Iustin Pop
    # this will trigger configuration file update, if needed
2609 b31c8676 Iustin Pop
    self.cfg.Update(node)
2610 b31c8676 Iustin Pop
    # this will trigger job queue propagation or cleanup
2611 c9d443ea Iustin Pop
    if changed_mc:
2612 3a26773f Iustin Pop
      self.context.ReaddNode(node)
2613 b31c8676 Iustin Pop
2614 b31c8676 Iustin Pop
    return result
2615 b31c8676 Iustin Pop
2616 b31c8676 Iustin Pop
2617 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
2618 a8083063 Iustin Pop
  """Query cluster configuration.
2619 a8083063 Iustin Pop

2620 a8083063 Iustin Pop
  """
2621 a8083063 Iustin Pop
  _OP_REQP = []
2622 642339cf Guido Trotter
  REQ_BGL = False
2623 642339cf Guido Trotter
2624 642339cf Guido Trotter
  def ExpandNames(self):
2625 642339cf Guido Trotter
    self.needed_locks = {}
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
  def CheckPrereq(self):
2628 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
2629 a8083063 Iustin Pop

2630 a8083063 Iustin Pop
    """
2631 a8083063 Iustin Pop
    pass
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2634 a8083063 Iustin Pop
    """Return cluster config.
2635 a8083063 Iustin Pop

2636 a8083063 Iustin Pop
    """
2637 469f88e1 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
2638 a8083063 Iustin Pop
    result = {
2639 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
2640 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
2641 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
2642 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
2643 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
2644 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
2645 469f88e1 Iustin Pop
      "name": cluster.cluster_name,
2646 469f88e1 Iustin Pop
      "master": cluster.master_node,
2647 02691904 Alexander Schreiber
      "default_hypervisor": cluster.default_hypervisor,
2648 469f88e1 Iustin Pop
      "enabled_hypervisors": cluster.enabled_hypervisors,
2649 b8810fec Michael Hanselmann
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
2650 7c4d6c7b Michael Hanselmann
                        for hypervisor_name in cluster.enabled_hypervisors]),
2651 469f88e1 Iustin Pop
      "beparams": cluster.beparams,
2652 4b7735f9 Iustin Pop
      "candidate_pool_size": cluster.candidate_pool_size,
2653 7a56b411 Guido Trotter
      "default_bridge": cluster.default_bridge,
2654 7a56b411 Guido Trotter
      "master_netdev": cluster.master_netdev,
2655 7a56b411 Guido Trotter
      "volume_group_name": cluster.volume_group_name,
2656 7a56b411 Guido Trotter
      "file_storage_dir": cluster.file_storage_dir,
2657 c118d1f4 Michael Hanselmann
      "tags": list(cluster.GetTags()),
2658 a8083063 Iustin Pop
      }
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
    return result
2661 a8083063 Iustin Pop
2662 a8083063 Iustin Pop
2663 ae5849b5 Michael Hanselmann
class LUQueryConfigValues(NoHooksLU):
2664 ae5849b5 Michael Hanselmann
  """Return configuration values.
2665 a8083063 Iustin Pop

2666 a8083063 Iustin Pop
  """
2667 a8083063 Iustin Pop
  _OP_REQP = []
2668 642339cf Guido Trotter
  REQ_BGL = False
2669 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet()
2670 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet("cluster_name", "master_node", "drain_flag")
2671 642339cf Guido Trotter
2672 642339cf Guido Trotter
  def ExpandNames(self):
2673 642339cf Guido Trotter
    self.needed_locks = {}
2674 a8083063 Iustin Pop
2675 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
2676 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
2677 ae5849b5 Michael Hanselmann
                       selected=self.op.output_fields)
2678 ae5849b5 Michael Hanselmann
2679 a8083063 Iustin Pop
  def CheckPrereq(self):
2680 a8083063 Iustin Pop
    """No prerequisites.
2681 a8083063 Iustin Pop

2682 a8083063 Iustin Pop
    """
2683 a8083063 Iustin Pop
    pass
2684 a8083063 Iustin Pop
2685 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2686 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
2687 a8083063 Iustin Pop

2688 a8083063 Iustin Pop
    """
2689 ae5849b5 Michael Hanselmann
    values = []
2690 ae5849b5 Michael Hanselmann
    for field in self.op.output_fields:
2691 ae5849b5 Michael Hanselmann
      if field == "cluster_name":
2692 3ccafd0e Iustin Pop
        entry = self.cfg.GetClusterName()
2693 ae5849b5 Michael Hanselmann
      elif field == "master_node":
2694 3ccafd0e Iustin Pop
        entry = self.cfg.GetMasterNode()
2695 3ccafd0e Iustin Pop
      elif field == "drain_flag":
2696 3ccafd0e Iustin Pop
        entry = os.path.exists(constants.JOB_QUEUE_DRAIN_FILE)
2697 ae5849b5 Michael Hanselmann
      else:
2698 ae5849b5 Michael Hanselmann
        raise errors.ParameterError(field)
2699 3ccafd0e Iustin Pop
      values.append(entry)
2700 ae5849b5 Michael Hanselmann
    return values
2701 a8083063 Iustin Pop
2702 a8083063 Iustin Pop
2703 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
2704 a8083063 Iustin Pop
  """Bring up an instance's disks.
2705 a8083063 Iustin Pop

2706 a8083063 Iustin Pop
  """
2707 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2708 f22a8ba3 Guido Trotter
  REQ_BGL = False
2709 f22a8ba3 Guido Trotter
2710 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2711 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2712 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2713 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2714 f22a8ba3 Guido Trotter
2715 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2716 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2717 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2718 a8083063 Iustin Pop
2719 a8083063 Iustin Pop
  def CheckPrereq(self):
2720 a8083063 Iustin Pop
    """Check prerequisites.
2721 a8083063 Iustin Pop

2722 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2723 a8083063 Iustin Pop

2724 a8083063 Iustin Pop
    """
2725 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2726 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2727 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2728 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
2729 b4ec07f8 Iustin Pop
    if not hasattr(self.op, "ignore_size"):
2730 b4ec07f8 Iustin Pop
      self.op.ignore_size = False
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2733 a8083063 Iustin Pop
    """Activate the disks.
2734 a8083063 Iustin Pop

2735 a8083063 Iustin Pop
    """
2736 b4ec07f8 Iustin Pop
    disks_ok, disks_info = \
2737 b4ec07f8 Iustin Pop
              _AssembleInstanceDisks(self, self.instance,
2738 b4ec07f8 Iustin Pop
                                     ignore_size=self.op.ignore_size)
2739 a8083063 Iustin Pop
    if not disks_ok:
2740 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
2741 a8083063 Iustin Pop
2742 a8083063 Iustin Pop
    return disks_info
2743 a8083063 Iustin Pop
2744 a8083063 Iustin Pop
2745 e3443b36 Iustin Pop
def _AssembleInstanceDisks(lu, instance, ignore_secondaries=False,
2746 e3443b36 Iustin Pop
                           ignore_size=False):
2747 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
2748 a8083063 Iustin Pop

2749 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
2750 a8083063 Iustin Pop

2751 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
2752 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
2753 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
2754 e4376078 Iustin Pop
  @param instance: the instance for whose disks we assemble
2755 e4376078 Iustin Pop
  @type ignore_secondaries: boolean
2756 e4376078 Iustin Pop
  @param ignore_secondaries: if true, errors on secondary nodes
2757 e4376078 Iustin Pop
      won't result in an error return from the function
2758 e3443b36 Iustin Pop
  @type ignore_size: boolean
2759 e3443b36 Iustin Pop
  @param ignore_size: if true, the current known size of the disk
2760 e3443b36 Iustin Pop
      will not be used during the disk activation, useful for cases
2761 e3443b36 Iustin Pop
      when the size is wrong
2762 e4376078 Iustin Pop
  @return: False if the operation failed, otherwise a list of
2763 e4376078 Iustin Pop
      (host, instance_visible_name, node_visible_name)
2764 e4376078 Iustin Pop
      with the mapping from node devices to instance devices
2765 a8083063 Iustin Pop

2766 a8083063 Iustin Pop
  """
2767 a8083063 Iustin Pop
  device_info = []
2768 a8083063 Iustin Pop
  disks_ok = True
2769 fdbd668d Iustin Pop
  iname = instance.name
2770 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
2771 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
2772 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
2773 fdbd668d Iustin Pop
2774 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
2775 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
2776 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
2777 fdbd668d Iustin Pop
  # SyncSource, etc.)
2778 fdbd668d Iustin Pop
2779 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
2780 a8083063 Iustin Pop
  for inst_disk in instance.disks:
2781 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2782 e3443b36 Iustin Pop
      if ignore_size:
2783 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2784 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2785 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2786 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2787 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2788 53c14ef1 Iustin Pop
      if msg:
2789 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2790 53c14ef1 Iustin Pop
                           " (is_primary=False, pass=1): %s",
2791 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2792 fdbd668d Iustin Pop
        if not ignore_secondaries:
2793 a8083063 Iustin Pop
          disks_ok = False
2794 fdbd668d Iustin Pop
2795 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
2796 fdbd668d Iustin Pop
2797 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
2798 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
2799 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2800 fdbd668d Iustin Pop
      if node != instance.primary_node:
2801 fdbd668d Iustin Pop
        continue
2802 e3443b36 Iustin Pop
      if ignore_size:
2803 e3443b36 Iustin Pop
        node_disk = node_disk.Copy()
2804 e3443b36 Iustin Pop
        node_disk.UnsetSize()
2805 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(node_disk, node)
2806 72737a7f Iustin Pop
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2807 53c14ef1 Iustin Pop
      msg = result.RemoteFailMsg()
2808 53c14ef1 Iustin Pop
      if msg:
2809 86d9d3bb Iustin Pop
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2810 53c14ef1 Iustin Pop
                           " (is_primary=True, pass=2): %s",
2811 53c14ef1 Iustin Pop
                           inst_disk.iv_name, node, msg)
2812 fdbd668d Iustin Pop
        disks_ok = False
2813 1dff8e07 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
2814 1dff8e07 Iustin Pop
                        result.payload))
2815 a8083063 Iustin Pop
2816 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
2817 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
2818 b352ab5b Iustin Pop
  # improving the logical/physical id handling
2819 b352ab5b Iustin Pop
  for disk in instance.disks:
2820 b9bddb6b Iustin Pop
    lu.cfg.SetDiskID(disk, instance.primary_node)
2821 b352ab5b Iustin Pop
2822 a8083063 Iustin Pop
  return disks_ok, device_info
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
2825 b9bddb6b Iustin Pop
def _StartInstanceDisks(lu, instance, force):
2826 3ecf6786 Iustin Pop
  """Start the disks of an instance.
2827 3ecf6786 Iustin Pop

2828 3ecf6786 Iustin Pop
  """
2829 7c4d6c7b Michael Hanselmann
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2830 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
2831 fe7b0351 Michael Hanselmann
  if not disks_ok:
2832 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(lu, instance)
2833 fe7b0351 Michael Hanselmann
    if force is not None and not force:
2834 86d9d3bb Iustin Pop
      lu.proc.LogWarning("", hint="If the message above refers to a"
2835 86d9d3bb Iustin Pop
                         " secondary node,"
2836 86d9d3bb Iustin Pop
                         " you can retry the operation using '--force'.")
2837 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
2838 fe7b0351 Michael Hanselmann
2839 fe7b0351 Michael Hanselmann
2840 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
2841 a8083063 Iustin Pop
  """Shutdown an instance's disks.
2842 a8083063 Iustin Pop

2843 a8083063 Iustin Pop
  """
2844 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2845 f22a8ba3 Guido Trotter
  REQ_BGL = False
2846 f22a8ba3 Guido Trotter
2847 f22a8ba3 Guido Trotter
  def ExpandNames(self):
2848 f22a8ba3 Guido Trotter
    self._ExpandAndLockInstance()
2849 f22a8ba3 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
2850 f22a8ba3 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2851 f22a8ba3 Guido Trotter
2852 f22a8ba3 Guido Trotter
  def DeclareLocks(self, level):
2853 f22a8ba3 Guido Trotter
    if level == locking.LEVEL_NODE:
2854 f22a8ba3 Guido Trotter
      self._LockInstancesNodes()
2855 a8083063 Iustin Pop
2856 a8083063 Iustin Pop
  def CheckPrereq(self):
2857 a8083063 Iustin Pop
    """Check prerequisites.
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2860 a8083063 Iustin Pop

2861 a8083063 Iustin Pop
    """
2862 f22a8ba3 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2863 f22a8ba3 Guido Trotter
    assert self.instance is not None, \
2864 f22a8ba3 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2865 a8083063 Iustin Pop
2866 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2867 a8083063 Iustin Pop
    """Deactivate the disks
2868 a8083063 Iustin Pop

2869 a8083063 Iustin Pop
    """
2870 a8083063 Iustin Pop
    instance = self.instance
2871 b9bddb6b Iustin Pop
    _SafeShutdownInstanceDisks(self, instance)
2872 a8083063 Iustin Pop
2873 a8083063 Iustin Pop
2874 b9bddb6b Iustin Pop
def _SafeShutdownInstanceDisks(lu, instance):
2875 155d6c75 Guido Trotter
  """Shutdown block devices of an instance.
2876 155d6c75 Guido Trotter

2877 155d6c75 Guido Trotter
  This function checks if an instance is running, before calling
2878 155d6c75 Guido Trotter
  _ShutdownInstanceDisks.
2879 155d6c75 Guido Trotter

2880 155d6c75 Guido Trotter
  """
2881 72737a7f Iustin Pop
  ins_l = lu.rpc.call_instance_list([instance.primary_node],
2882 72737a7f Iustin Pop
                                      [instance.hypervisor])
2883 155d6c75 Guido Trotter
  ins_l = ins_l[instance.primary_node]
2884 781de953 Iustin Pop
  if ins_l.failed or not isinstance(ins_l.data, list):
2885 155d6c75 Guido Trotter
    raise errors.OpExecError("Can't contact node '%s'" %
2886 155d6c75 Guido Trotter
                             instance.primary_node)
2887 155d6c75 Guido Trotter
2888 781de953 Iustin Pop
  if instance.name in ins_l.data:
2889 155d6c75 Guido Trotter
    raise errors.OpExecError("Instance is running, can't shutdown"
2890 155d6c75 Guido Trotter
                             " block devices.")
2891 155d6c75 Guido Trotter
2892 b9bddb6b Iustin Pop
  _ShutdownInstanceDisks(lu, instance)
2893 a8083063 Iustin Pop
2894 a8083063 Iustin Pop
2895 b9bddb6b Iustin Pop
def _ShutdownInstanceDisks(lu, instance, ignore_primary=False):
2896 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
2897 a8083063 Iustin Pop

2898 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
2899 a8083063 Iustin Pop

2900 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
2901 a8083063 Iustin Pop
  ignored.
2902 a8083063 Iustin Pop

2903 a8083063 Iustin Pop
  """
2904 cacfd1fd Iustin Pop
  all_result = True
2905 a8083063 Iustin Pop
  for disk in instance.disks:
2906 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2907 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(top_disk, node)
2908 781de953 Iustin Pop
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2909 cacfd1fd Iustin Pop
      msg = result.RemoteFailMsg()
2910 cacfd1fd Iustin Pop
      if msg:
2911 cacfd1fd Iustin Pop
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2912 cacfd1fd Iustin Pop
                      disk.iv_name, node, msg)
2913 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
2914 cacfd1fd Iustin Pop
          all_result = False
2915 cacfd1fd Iustin Pop
  return all_result
2916 a8083063 Iustin Pop
2917 a8083063 Iustin Pop
2918 9ca87a96 Iustin Pop
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
2919 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
2920 d4f16fd9 Iustin Pop

2921 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2922 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2923 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2924 d4f16fd9 Iustin Pop
  exception.
2925 d4f16fd9 Iustin Pop

2926 b9bddb6b Iustin Pop
  @type lu: C{LogicalUnit}
2927 b9bddb6b Iustin Pop
  @param lu: a logical unit from which we get configuration data
2928 e69d05fd Iustin Pop
  @type node: C{str}
2929 e69d05fd Iustin Pop
  @param node: the node to check
2930 e69d05fd Iustin Pop
  @type reason: C{str}
2931 e69d05fd Iustin Pop
  @param reason: string to use in the error message
2932 e69d05fd Iustin Pop
  @type requested: C{int}
2933 e69d05fd Iustin Pop
  @param requested: the amount of memory in MiB to check for
2934 9ca87a96 Iustin Pop
  @type hypervisor_name: C{str}
2935 9ca87a96 Iustin Pop
  @param hypervisor_name: the hypervisor to ask for memory stats
2936 e69d05fd Iustin Pop
  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
2937 e69d05fd Iustin Pop
      we cannot check the node
2938 d4f16fd9 Iustin Pop

2939 d4f16fd9 Iustin Pop
  """
2940 9ca87a96 Iustin Pop
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2941 781de953 Iustin Pop
  nodeinfo[node].Raise()
2942 781de953 Iustin Pop
  free_mem = nodeinfo[node].data.get('memory_free')
2943 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2944 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2945 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2946 d4f16fd9 Iustin Pop
  if requested > free_mem:
2947 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2948 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2949 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2950 d4f16fd9 Iustin Pop
2951 d4f16fd9 Iustin Pop
2952 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2953 a8083063 Iustin Pop
  """Starts an instance.
2954 a8083063 Iustin Pop

2955 a8083063 Iustin Pop
  """
2956 a8083063 Iustin Pop
  HPATH = "instance-start"
2957 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2958 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2959 e873317a Guido Trotter
  REQ_BGL = False
2960 e873317a Guido Trotter
2961 e873317a Guido Trotter
  def ExpandNames(self):
2962 e873317a Guido Trotter
    self._ExpandAndLockInstance()
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2965 a8083063 Iustin Pop
    """Build hooks env.
2966 a8083063 Iustin Pop

2967 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2968 a8083063 Iustin Pop

2969 a8083063 Iustin Pop
    """
2970 a8083063 Iustin Pop
    env = {
2971 a8083063 Iustin Pop
      "FORCE": self.op.force,
2972 a8083063 Iustin Pop
      }
2973 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
2974 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2975 a8083063 Iustin Pop
    return env, nl, nl
2976 a8083063 Iustin Pop
2977 a8083063 Iustin Pop
  def CheckPrereq(self):
2978 a8083063 Iustin Pop
    """Check prerequisites.
2979 a8083063 Iustin Pop

2980 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2981 a8083063 Iustin Pop

2982 a8083063 Iustin Pop
    """
2983 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2984 e873317a Guido Trotter
    assert self.instance is not None, \
2985 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
2986 a8083063 Iustin Pop
2987 d04aaa2f Iustin Pop
    # extra beparams
2988 d04aaa2f Iustin Pop
    self.beparams = getattr(self.op, "beparams", {})
2989 d04aaa2f Iustin Pop
    if self.beparams:
2990 d04aaa2f Iustin Pop
      if not isinstance(self.beparams, dict):
2991 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
2992 d04aaa2f Iustin Pop
                                   " dict" % (type(self.beparams), ))
2993 d04aaa2f Iustin Pop
      # fill the beparams dict
2994 d04aaa2f Iustin Pop
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
2995 d04aaa2f Iustin Pop
      self.op.beparams = self.beparams
2996 d04aaa2f Iustin Pop
2997 d04aaa2f Iustin Pop
    # extra hvparams
2998 d04aaa2f Iustin Pop
    self.hvparams = getattr(self.op, "hvparams", {})
2999 d04aaa2f Iustin Pop
    if self.hvparams:
3000 d04aaa2f Iustin Pop
      if not isinstance(self.hvparams, dict):
3001 d04aaa2f Iustin Pop
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
3002 d04aaa2f Iustin Pop
                                   " dict" % (type(self.hvparams), ))
3003 d04aaa2f Iustin Pop
3004 d04aaa2f Iustin Pop
      # check hypervisor parameter syntax (locally)
3005 d04aaa2f Iustin Pop
      cluster = self.cfg.GetClusterInfo()
3006 d04aaa2f Iustin Pop
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
3007 d04aaa2f Iustin Pop
      filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
3008 d04aaa2f Iustin Pop
                                    instance.hvparams)
3009 d04aaa2f Iustin Pop
      filled_hvp.update(self.hvparams)
3010 d04aaa2f Iustin Pop
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
3011 d04aaa2f Iustin Pop
      hv_type.CheckParameterSyntax(filled_hvp)
3012 d04aaa2f Iustin Pop
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
3013 d04aaa2f Iustin Pop
      self.op.hvparams = self.hvparams
3014 d04aaa2f Iustin Pop
3015 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3016 7527a8a4 Iustin Pop
3017 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3018 5bbd3f7f Michael Hanselmann
    # check bridges existence
3019 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3020 a8083063 Iustin Pop
3021 f1926756 Guido Trotter
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3022 f1926756 Guido Trotter
                                              instance.name,
3023 f1926756 Guido Trotter
                                              instance.hypervisor)
3024 f1926756 Guido Trotter
    remote_info.Raise()
3025 f1926756 Guido Trotter
    if not remote_info.data:
3026 f1926756 Guido Trotter
      _CheckNodeFreeMemory(self, instance.primary_node,
3027 f1926756 Guido Trotter
                           "starting instance %s" % instance.name,
3028 f1926756 Guido Trotter
                           bep[constants.BE_MEMORY], instance.hypervisor)
3029 d4f16fd9 Iustin Pop
3030 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3031 a8083063 Iustin Pop
    """Start the instance.
3032 a8083063 Iustin Pop

3033 a8083063 Iustin Pop
    """
3034 a8083063 Iustin Pop
    instance = self.instance
3035 a8083063 Iustin Pop
    force = self.op.force
3036 a8083063 Iustin Pop
3037 fe482621 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
3038 fe482621 Iustin Pop
3039 a8083063 Iustin Pop
    node_current = instance.primary_node
3040 a8083063 Iustin Pop
3041 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, instance, force)
3042 a8083063 Iustin Pop
3043 d04aaa2f Iustin Pop
    result = self.rpc.call_instance_start(node_current, instance,
3044 d04aaa2f Iustin Pop
                                          self.hvparams, self.beparams)
3045 dd279568 Iustin Pop
    msg = result.RemoteFailMsg()
3046 dd279568 Iustin Pop
    if msg:
3047 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3048 dd279568 Iustin Pop
      raise errors.OpExecError("Could not start instance: %s" % msg)
3049 a8083063 Iustin Pop
3050 a8083063 Iustin Pop
3051 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
3052 bf6929a2 Alexander Schreiber
  """Reboot an instance.
3053 bf6929a2 Alexander Schreiber

3054 bf6929a2 Alexander Schreiber
  """
3055 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
3056 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
3057 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
3058 e873317a Guido Trotter
  REQ_BGL = False
3059 e873317a Guido Trotter
3060 e873317a Guido Trotter
  def ExpandNames(self):
3061 0fcc5db3 Guido Trotter
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
3062 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3063 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL]:
3064 0fcc5db3 Guido Trotter
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
3065 0fcc5db3 Guido Trotter
                                  (constants.INSTANCE_REBOOT_SOFT,
3066 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_HARD,
3067 0fcc5db3 Guido Trotter
                                   constants.INSTANCE_REBOOT_FULL))
3068 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3069 bf6929a2 Alexander Schreiber
3070 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
3071 bf6929a2 Alexander Schreiber
    """Build hooks env.
3072 bf6929a2 Alexander Schreiber

3073 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
3074 bf6929a2 Alexander Schreiber

3075 bf6929a2 Alexander Schreiber
    """
3076 bf6929a2 Alexander Schreiber
    env = {
3077 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
3078 2c2690c9 Iustin Pop
      "REBOOT_TYPE": self.op.reboot_type,
3079 bf6929a2 Alexander Schreiber
      }
3080 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3081 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3082 bf6929a2 Alexander Schreiber
    return env, nl, nl
3083 bf6929a2 Alexander Schreiber
3084 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
3085 bf6929a2 Alexander Schreiber
    """Check prerequisites.
3086 bf6929a2 Alexander Schreiber

3087 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
3088 bf6929a2 Alexander Schreiber

3089 bf6929a2 Alexander Schreiber
    """
3090 e873317a Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3091 e873317a Guido Trotter
    assert self.instance is not None, \
3092 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3093 bf6929a2 Alexander Schreiber
3094 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3095 7527a8a4 Iustin Pop
3096 5bbd3f7f Michael Hanselmann
    # check bridges existence
3097 b9bddb6b Iustin Pop
    _CheckInstanceBridgesExist(self, instance)
3098 bf6929a2 Alexander Schreiber
3099 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
3100 bf6929a2 Alexander Schreiber
    """Reboot the instance.
3101 bf6929a2 Alexander Schreiber

3102 bf6929a2 Alexander Schreiber
    """
3103 bf6929a2 Alexander Schreiber
    instance = self.instance
3104 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
3105 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
3106 bf6929a2 Alexander Schreiber
3107 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
3108 bf6929a2 Alexander Schreiber
3109 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
3110 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
3111 ae48ac32 Iustin Pop
      for disk in instance.disks:
3112 ae48ac32 Iustin Pop
        self.cfg.SetDiskID(disk, node_current)
3113 781de953 Iustin Pop
      result = self.rpc.call_instance_reboot(node_current, instance,
3114 07813a9e Iustin Pop
                                             reboot_type)
3115 489fcbe9 Iustin Pop
      msg = result.RemoteFailMsg()
3116 489fcbe9 Iustin Pop
      if msg:
3117 489fcbe9 Iustin Pop
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
3118 bf6929a2 Alexander Schreiber
    else:
3119 1fae010f Iustin Pop
      result = self.rpc.call_instance_shutdown(node_current, instance)
3120 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
3121 1fae010f Iustin Pop
      if msg:
3122 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance for"
3123 1fae010f Iustin Pop
                                 " full reboot: %s" % msg)
3124 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, instance)
3125 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, ignore_secondaries)
3126 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3127 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3128 dd279568 Iustin Pop
      if msg:
3129 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3130 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance for"
3131 dd279568 Iustin Pop
                                 " full reboot: %s" % msg)
3132 bf6929a2 Alexander Schreiber
3133 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
3134 bf6929a2 Alexander Schreiber
3135 bf6929a2 Alexander Schreiber
3136 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
3137 a8083063 Iustin Pop
  """Shutdown an instance.
3138 a8083063 Iustin Pop

3139 a8083063 Iustin Pop
  """
3140 a8083063 Iustin Pop
  HPATH = "instance-stop"
3141 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3142 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3143 e873317a Guido Trotter
  REQ_BGL = False
3144 e873317a Guido Trotter
3145 e873317a Guido Trotter
  def ExpandNames(self):
3146 e873317a Guido Trotter
    self._ExpandAndLockInstance()
3147 a8083063 Iustin Pop
3148 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3149 a8083063 Iustin Pop
    """Build hooks env.
3150 a8083063 Iustin Pop

3151 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3152 a8083063 Iustin Pop

3153 a8083063 Iustin Pop
    """
3154 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3155 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3156 a8083063 Iustin Pop
    return env, nl, nl
3157 a8083063 Iustin Pop
3158 a8083063 Iustin Pop
  def CheckPrereq(self):
3159 a8083063 Iustin Pop
    """Check prerequisites.
3160 a8083063 Iustin Pop

3161 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3162 a8083063 Iustin Pop

3163 a8083063 Iustin Pop
    """
3164 e873317a Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3165 e873317a Guido Trotter
    assert self.instance is not None, \
3166 e873317a Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3167 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
3168 a8083063 Iustin Pop
3169 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3170 a8083063 Iustin Pop
    """Shutdown the instance.
3171 a8083063 Iustin Pop

3172 a8083063 Iustin Pop
    """
3173 a8083063 Iustin Pop
    instance = self.instance
3174 a8083063 Iustin Pop
    node_current = instance.primary_node
3175 fe482621 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
3176 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(node_current, instance)
3177 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3178 1fae010f Iustin Pop
    if msg:
3179 1fae010f Iustin Pop
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3180 a8083063 Iustin Pop
3181 b9bddb6b Iustin Pop
    _ShutdownInstanceDisks(self, instance)
3182 a8083063 Iustin Pop
3183 a8083063 Iustin Pop
3184 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
3185 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
3186 fe7b0351 Michael Hanselmann

3187 fe7b0351 Michael Hanselmann
  """
3188 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
3189 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
3190 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
3191 4e0b4d2d Guido Trotter
  REQ_BGL = False
3192 4e0b4d2d Guido Trotter
3193 4e0b4d2d Guido Trotter
  def ExpandNames(self):
3194 4e0b4d2d Guido Trotter
    self._ExpandAndLockInstance()
3195 fe7b0351 Michael Hanselmann
3196 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
3197 fe7b0351 Michael Hanselmann
    """Build hooks env.
3198 fe7b0351 Michael Hanselmann

3199 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
3200 fe7b0351 Michael Hanselmann

3201 fe7b0351 Michael Hanselmann
    """
3202 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3203 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3204 fe7b0351 Michael Hanselmann
    return env, nl, nl
3205 fe7b0351 Michael Hanselmann
3206 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
3207 fe7b0351 Michael Hanselmann
    """Check prerequisites.
3208 fe7b0351 Michael Hanselmann

3209 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
3210 fe7b0351 Michael Hanselmann

3211 fe7b0351 Michael Hanselmann
    """
3212 4e0b4d2d Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3213 4e0b4d2d Guido Trotter
    assert instance is not None, \
3214 4e0b4d2d Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3215 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3216 4e0b4d2d Guido Trotter
3217 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
3218 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
3219 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3220 0d68c45d Iustin Pop
    if instance.admin_up:
3221 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3222 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3223 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3224 72737a7f Iustin Pop
                                              instance.name,
3225 72737a7f Iustin Pop
                                              instance.hypervisor)
3226 b4874c9e Guido Trotter
    remote_info.Raise()
3227 b4874c9e Guido Trotter
    if remote_info.data:
3228 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3229 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
3230 3ecf6786 Iustin Pop
                                  instance.primary_node))
3231 d0834de3 Michael Hanselmann
3232 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
3233 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3234 d0834de3 Michael Hanselmann
      # OS verification
3235 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
3236 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
3237 d0834de3 Michael Hanselmann
      if pnode is None:
3238 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3239 3ecf6786 Iustin Pop
                                   self.op.pnode)
3240 781de953 Iustin Pop
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3241 781de953 Iustin Pop
      result.Raise()
3242 781de953 Iustin Pop
      if not isinstance(result.data, objects.OS):
3243 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3244 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
3245 d0834de3 Michael Hanselmann
3246 fe7b0351 Michael Hanselmann
    self.instance = instance
3247 fe7b0351 Michael Hanselmann
3248 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
3249 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
3250 fe7b0351 Michael Hanselmann

3251 fe7b0351 Michael Hanselmann
    """
3252 fe7b0351 Michael Hanselmann
    inst = self.instance
3253 fe7b0351 Michael Hanselmann
3254 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
3255 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
3256 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
3257 97abc79f Iustin Pop
      self.cfg.Update(inst)
3258 d0834de3 Michael Hanselmann
3259 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3260 fe7b0351 Michael Hanselmann
    try:
3261 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
3262 781de953 Iustin Pop
      result = self.rpc.call_instance_os_add(inst.primary_node, inst)
3263 20e01edd Iustin Pop
      msg = result.RemoteFailMsg()
3264 20e01edd Iustin Pop
      if msg:
3265 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
3266 20e01edd Iustin Pop
                                 " on node %s: %s" %
3267 20e01edd Iustin Pop
                                 (inst.name, inst.primary_node, msg))
3268 fe7b0351 Michael Hanselmann
    finally:
3269 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3270 fe7b0351 Michael Hanselmann
3271 fe7b0351 Michael Hanselmann
3272 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
3273 decd5f45 Iustin Pop
  """Rename an instance.
3274 decd5f45 Iustin Pop

3275 decd5f45 Iustin Pop
  """
3276 decd5f45 Iustin Pop
  HPATH = "instance-rename"
3277 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3278 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
3279 decd5f45 Iustin Pop
3280 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
3281 decd5f45 Iustin Pop
    """Build hooks env.
3282 decd5f45 Iustin Pop

3283 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3284 decd5f45 Iustin Pop

3285 decd5f45 Iustin Pop
    """
3286 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3287 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
3288 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
3289 decd5f45 Iustin Pop
    return env, nl, nl
3290 decd5f45 Iustin Pop
3291 decd5f45 Iustin Pop
  def CheckPrereq(self):
3292 decd5f45 Iustin Pop
    """Check prerequisites.
3293 decd5f45 Iustin Pop

3294 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
3295 decd5f45 Iustin Pop

3296 decd5f45 Iustin Pop
    """
3297 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3298 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3299 decd5f45 Iustin Pop
    if instance is None:
3300 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3301 decd5f45 Iustin Pop
                                 self.op.instance_name)
3302 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, instance.primary_node)
3303 7527a8a4 Iustin Pop
3304 0d68c45d Iustin Pop
    if instance.admin_up:
3305 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
3306 decd5f45 Iustin Pop
                                 self.op.instance_name)
3307 72737a7f Iustin Pop
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3308 72737a7f Iustin Pop
                                              instance.name,
3309 72737a7f Iustin Pop
                                              instance.hypervisor)
3310 781de953 Iustin Pop
    remote_info.Raise()
3311 781de953 Iustin Pop
    if remote_info.data:
3312 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3313 decd5f45 Iustin Pop
                                 (self.op.instance_name,
3314 decd5f45 Iustin Pop
                                  instance.primary_node))
3315 decd5f45 Iustin Pop
    self.instance = instance
3316 decd5f45 Iustin Pop
3317 decd5f45 Iustin Pop
    # new name verification
3318 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
3319 decd5f45 Iustin Pop
3320 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
3321 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
3322 7bde3275 Guido Trotter
    if new_name in instance_list:
3323 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3324 c09f363f Manuel Franceschini
                                 new_name)
3325 7bde3275 Guido Trotter
3326 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
3327 937f983d Guido Trotter
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
3328 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3329 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
3330 decd5f45 Iustin Pop
3331 decd5f45 Iustin Pop
3332 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
3333 decd5f45 Iustin Pop
    """Reinstall the instance.
3334 decd5f45 Iustin Pop

3335 decd5f45 Iustin Pop
    """
3336 decd5f45 Iustin Pop
    inst = self.instance
3337 decd5f45 Iustin Pop
    old_name = inst.name
3338 decd5f45 Iustin Pop
3339 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3340 b23c4333 Manuel Franceschini
      old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3341 b23c4333 Manuel Franceschini
3342 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
3343 74b5913f Guido Trotter
    # Change the instance lock. This is definitely safe while we hold the BGL
3344 cb4e8387 Iustin Pop
    self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
3345 74b5913f Guido Trotter
    self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
3346 decd5f45 Iustin Pop
3347 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
3348 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
3349 decd5f45 Iustin Pop
3350 b23c4333 Manuel Franceschini
    if inst.disk_template == constants.DT_FILE:
3351 b23c4333 Manuel Franceschini
      new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
3352 72737a7f Iustin Pop
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3353 72737a7f Iustin Pop
                                                     old_file_storage_dir,
3354 72737a7f Iustin Pop
                                                     new_file_storage_dir)
3355 781de953 Iustin Pop
      result.Raise()
3356 781de953 Iustin Pop
      if not result.data:
3357 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not connect to node '%s' to rename"
3358 b23c4333 Manuel Franceschini
                                 " directory '%s' to '%s' (but the instance"
3359 b23c4333 Manuel Franceschini
                                 " has been renamed in Ganeti)" % (
3360 b23c4333 Manuel Franceschini
                                 inst.primary_node, old_file_storage_dir,
3361 b23c4333 Manuel Franceschini
                                 new_file_storage_dir))
3362 b23c4333 Manuel Franceschini
3363 781de953 Iustin Pop
      if not result.data[0]:
3364 b23c4333 Manuel Franceschini
        raise errors.OpExecError("Could not rename directory '%s' to '%s'"
3365 b23c4333 Manuel Franceschini
                                 " (but the instance has been renamed in"
3366 b23c4333 Manuel Franceschini
                                 " Ganeti)" % (old_file_storage_dir,
3367 b23c4333 Manuel Franceschini
                                               new_file_storage_dir))
3368 b23c4333 Manuel Franceschini
3369 b9bddb6b Iustin Pop
    _StartInstanceDisks(self, inst, None)
3370 decd5f45 Iustin Pop
    try:
3371 781de953 Iustin Pop
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3372 781de953 Iustin Pop
                                                 old_name)
3373 96841384 Iustin Pop
      msg = result.RemoteFailMsg()
3374 96841384 Iustin Pop
      if msg:
3375 6291574d Alexander Schreiber
        msg = ("Could not run OS rename script for instance %s on node %s"
3376 96841384 Iustin Pop
               " (but the instance has been renamed in Ganeti): %s" %
3377 96841384 Iustin Pop
               (inst.name, inst.primary_node, msg))
3378 86d9d3bb Iustin Pop
        self.proc.LogWarning(msg)
3379 decd5f45 Iustin Pop
    finally:
3380 b9bddb6b Iustin Pop
      _ShutdownInstanceDisks(self, inst)
3381 decd5f45 Iustin Pop
3382 decd5f45 Iustin Pop
3383 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
3384 a8083063 Iustin Pop
  """Remove an instance.
3385 a8083063 Iustin Pop

3386 a8083063 Iustin Pop
  """
3387 a8083063 Iustin Pop
  HPATH = "instance-remove"
3388 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3389 5c54b832 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_failures"]
3390 cf472233 Guido Trotter
  REQ_BGL = False
3391 cf472233 Guido Trotter
3392 cf472233 Guido Trotter
  def ExpandNames(self):
3393 cf472233 Guido Trotter
    self._ExpandAndLockInstance()
3394 cf472233 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3395 cf472233 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3396 cf472233 Guido Trotter
3397 cf472233 Guido Trotter
  def DeclareLocks(self, level):
3398 cf472233 Guido Trotter
    if level == locking.LEVEL_NODE:
3399 cf472233 Guido Trotter
      self._LockInstancesNodes()
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3402 a8083063 Iustin Pop
    """Build hooks env.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3405 a8083063 Iustin Pop

3406 a8083063 Iustin Pop
    """
3407 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3408 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()]
3409 a8083063 Iustin Pop
    return env, nl, nl
3410 a8083063 Iustin Pop
3411 a8083063 Iustin Pop
  def CheckPrereq(self):
3412 a8083063 Iustin Pop
    """Check prerequisites.
3413 a8083063 Iustin Pop

3414 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3415 a8083063 Iustin Pop

3416 a8083063 Iustin Pop
    """
3417 cf472233 Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3418 cf472233 Guido Trotter
    assert self.instance is not None, \
3419 cf472233 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3420 a8083063 Iustin Pop
3421 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3422 a8083063 Iustin Pop
    """Remove the instance.
3423 a8083063 Iustin Pop

3424 a8083063 Iustin Pop
    """
3425 a8083063 Iustin Pop
    instance = self.instance
3426 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3427 9a4f63d1 Iustin Pop
                 instance.name, instance.primary_node)
3428 a8083063 Iustin Pop
3429 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3430 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3431 1fae010f Iustin Pop
    if msg:
3432 1d67656e Iustin Pop
      if self.op.ignore_failures:
3433 1fae010f Iustin Pop
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
3434 1d67656e Iustin Pop
      else:
3435 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3436 1fae010f Iustin Pop
                                 " node %s: %s" %
3437 1fae010f Iustin Pop
                                 (instance.name, instance.primary_node, msg))
3438 a8083063 Iustin Pop
3439 9a4f63d1 Iustin Pop
    logging.info("Removing block devices for instance %s", instance.name)
3440 a8083063 Iustin Pop
3441 b9bddb6b Iustin Pop
    if not _RemoveDisks(self, instance):
3442 1d67656e Iustin Pop
      if self.op.ignore_failures:
3443 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
3444 1d67656e Iustin Pop
      else:
3445 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
3446 a8083063 Iustin Pop
3447 9a4f63d1 Iustin Pop
    logging.info("Removing instance %s out of cluster config", instance.name)
3448 a8083063 Iustin Pop
3449 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
3450 cf472233 Guido Trotter
    self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
3453 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
3454 a8083063 Iustin Pop
  """Logical unit for querying instances.
3455 a8083063 Iustin Pop

3456 a8083063 Iustin Pop
  """
3457 ec79568d Iustin Pop
  _OP_REQP = ["output_fields", "names", "use_locking"]
3458 7eb9d8f7 Guido Trotter
  REQ_BGL = False
3459 a2d2e1a7 Iustin Pop
  _FIELDS_STATIC = utils.FieldSet(*["name", "os", "pnode", "snodes",
3460 5b460366 Iustin Pop
                                    "admin_state",
3461 a2d2e1a7 Iustin Pop
                                    "disk_template", "ip", "mac", "bridge",
3462 a2d2e1a7 Iustin Pop
                                    "sda_size", "sdb_size", "vcpus", "tags",
3463 a2d2e1a7 Iustin Pop
                                    "network_port", "beparams",
3464 8aec325c Iustin Pop
                                    r"(disk)\.(size)/([0-9]+)",
3465 8aec325c Iustin Pop
                                    r"(disk)\.(sizes)", "disk_usage",
3466 8aec325c Iustin Pop
                                    r"(nic)\.(mac|ip|bridge)/([0-9]+)",
3467 8aec325c Iustin Pop
                                    r"(nic)\.(macs|ips|bridges)",
3468 8aec325c Iustin Pop
                                    r"(disk|nic)\.(count)",
3469 a2d2e1a7 Iustin Pop
                                    "serial_no", "hypervisor", "hvparams",] +
3470 a2d2e1a7 Iustin Pop
                                  ["hv/%s" % name
3471 a2d2e1a7 Iustin Pop
                                   for name in constants.HVS_PARAMETERS] +
3472 a2d2e1a7 Iustin Pop
                                  ["be/%s" % name
3473 a2d2e1a7 Iustin Pop
                                   for name in constants.BES_PARAMETERS])
3474 a2d2e1a7 Iustin Pop
  _FIELDS_DYNAMIC = utils.FieldSet("oper_state", "oper_ram", "status")
3475 31bf511f Iustin Pop
3476 a8083063 Iustin Pop
3477 7eb9d8f7 Guido Trotter
  def ExpandNames(self):
3478 31bf511f Iustin Pop
    _CheckOutputFields(static=self._FIELDS_STATIC,
3479 31bf511f Iustin Pop
                       dynamic=self._FIELDS_DYNAMIC,
3480 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
3481 a8083063 Iustin Pop
3482 7eb9d8f7 Guido Trotter
    self.needed_locks = {}
3483 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_INSTANCE] = 1
3484 7eb9d8f7 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
3485 7eb9d8f7 Guido Trotter
3486 57a2fb91 Iustin Pop
    if self.op.names:
3487 57a2fb91 Iustin Pop
      self.wanted = _GetWantedInstances(self, self.op.names)
3488 7eb9d8f7 Guido Trotter
    else:
3489 57a2fb91 Iustin Pop
      self.wanted = locking.ALL_SET
3490 7eb9d8f7 Guido Trotter
3491 ec79568d Iustin Pop
    self.do_node_query = self._FIELDS_STATIC.NonMatching(self.op.output_fields)
3492 ec79568d Iustin Pop
    self.do_locking = self.do_node_query and self.op.use_locking
3493 57a2fb91 Iustin Pop
    if self.do_locking:
3494 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
3495 57a2fb91 Iustin Pop
      self.needed_locks[locking.LEVEL_NODE] = []
3496 57a2fb91 Iustin Pop
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3497 7eb9d8f7 Guido Trotter
3498 7eb9d8f7 Guido Trotter
  def DeclareLocks(self, level):
3499 57a2fb91 Iustin Pop
    if level == locking.LEVEL_NODE and self.do_locking:
3500 7eb9d8f7 Guido Trotter
      self._LockInstancesNodes()
3501 7eb9d8f7 Guido Trotter
3502 7eb9d8f7 Guido Trotter
  def CheckPrereq(self):
3503 7eb9d8f7 Guido Trotter
    """Check prerequisites.
3504 7eb9d8f7 Guido Trotter

3505 7eb9d8f7 Guido Trotter
    """
3506 57a2fb91 Iustin Pop
    pass
3507 069dcc86 Iustin Pop
3508 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3509 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
3510 a8083063 Iustin Pop

3511 a8083063 Iustin Pop
    """
3512 57a2fb91 Iustin Pop
    all_info = self.cfg.GetAllInstancesInfo()
3513 a7f5dc98 Iustin Pop
    if self.wanted == locking.ALL_SET:
3514 a7f5dc98 Iustin Pop
      # caller didn't specify instance names, so ordering is not important
3515 a7f5dc98 Iustin Pop
      if self.do_locking:
3516 a7f5dc98 Iustin Pop
        instance_names = self.acquired_locks[locking.LEVEL_INSTANCE]
3517 a7f5dc98 Iustin Pop
      else:
3518 a7f5dc98 Iustin Pop
        instance_names = all_info.keys()
3519 a7f5dc98 Iustin Pop
      instance_names = utils.NiceSort(instance_names)
3520 57a2fb91 Iustin Pop
    else:
3521 a7f5dc98 Iustin Pop
      # caller did specify names, so we must keep the ordering
3522 a7f5dc98 Iustin Pop
      if self.do_locking:
3523 a7f5dc98 Iustin Pop
        tgt_set = self.acquired_locks[locking.LEVEL_INSTANCE]
3524 a7f5dc98 Iustin Pop
      else:
3525 a7f5dc98 Iustin Pop
        tgt_set = all_info.keys()
3526 a7f5dc98 Iustin Pop
      missing = set(self.wanted).difference(tgt_set)
3527 a7f5dc98 Iustin Pop
      if missing:
3528 a7f5dc98 Iustin Pop
        raise errors.OpExecError("Some instances were removed before"
3529 a7f5dc98 Iustin Pop
                                 " retrieving their data: %s" % missing)
3530 a7f5dc98 Iustin Pop
      instance_names = self.wanted
3531 c1f1cbb2 Iustin Pop
3532 57a2fb91 Iustin Pop
    instance_list = [all_info[iname] for iname in instance_names]
3533 a8083063 Iustin Pop
3534 a8083063 Iustin Pop
    # begin data gathering
3535 a8083063 Iustin Pop
3536 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
3537 e69d05fd Iustin Pop
    hv_list = list(set([inst.hypervisor for inst in instance_list]))
3538 a8083063 Iustin Pop
3539 a8083063 Iustin Pop
    bad_nodes = []
3540 cbfc4681 Iustin Pop
    off_nodes = []
3541 ec79568d Iustin Pop
    if self.do_node_query:
3542 a8083063 Iustin Pop
      live_data = {}
3543 72737a7f Iustin Pop
      node_data = self.rpc.call_all_instances_info(nodes, hv_list)
3544 a8083063 Iustin Pop
      for name in nodes:
3545 a8083063 Iustin Pop
        result = node_data[name]
3546 cbfc4681 Iustin Pop
        if result.offline:
3547 cbfc4681 Iustin Pop
          # offline nodes will be in both lists
3548 cbfc4681 Iustin Pop
          off_nodes.append(name)
3549 781de953 Iustin Pop
        if result.failed:
3550 a8083063 Iustin Pop
          bad_nodes.append(name)
3551 781de953 Iustin Pop
        else:
3552 781de953 Iustin Pop
          if result.data:
3553 781de953 Iustin Pop
            live_data.update(result.data)
3554 781de953 Iustin Pop
            # else no instance is alive
3555 a8083063 Iustin Pop
    else:
3556 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
3557 a8083063 Iustin Pop
3558 a8083063 Iustin Pop
    # end data gathering
3559 a8083063 Iustin Pop
3560 5018a335 Iustin Pop
    HVPREFIX = "hv/"
3561 338e51e8 Iustin Pop
    BEPREFIX = "be/"
3562 a8083063 Iustin Pop
    output = []
3563 a8083063 Iustin Pop
    for instance in instance_list:
3564 a8083063 Iustin Pop
      iout = []
3565 5018a335 Iustin Pop
      i_hv = self.cfg.GetClusterInfo().FillHV(instance)
3566 338e51e8 Iustin Pop
      i_be = self.cfg.GetClusterInfo().FillBE(instance)
3567 a8083063 Iustin Pop
      for field in self.op.output_fields:
3568 71c1af58 Iustin Pop
        st_match = self._FIELDS_STATIC.Matches(field)
3569 a8083063 Iustin Pop
        if field == "name":
3570 a8083063 Iustin Pop
          val = instance.name
3571 a8083063 Iustin Pop
        elif field == "os":
3572 a8083063 Iustin Pop
          val = instance.os
3573 a8083063 Iustin Pop
        elif field == "pnode":
3574 a8083063 Iustin Pop
          val = instance.primary_node
3575 a8083063 Iustin Pop
        elif field == "snodes":
3576 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
3577 a8083063 Iustin Pop
        elif field == "admin_state":
3578 0d68c45d Iustin Pop
          val = instance.admin_up
3579 a8083063 Iustin Pop
        elif field == "oper_state":
3580 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3581 8a23d2d3 Iustin Pop
            val = None
3582 a8083063 Iustin Pop
          else:
3583 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
3584 d8052456 Iustin Pop
        elif field == "status":
3585 cbfc4681 Iustin Pop
          if instance.primary_node in off_nodes:
3586 cbfc4681 Iustin Pop
            val = "ERROR_nodeoffline"
3587 cbfc4681 Iustin Pop
          elif instance.primary_node in bad_nodes:
3588 d8052456 Iustin Pop
            val = "ERROR_nodedown"
3589 d8052456 Iustin Pop
          else:
3590 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
3591 d8052456 Iustin Pop
            if running:
3592 0d68c45d Iustin Pop
              if instance.admin_up:
3593 d8052456 Iustin Pop
                val = "running"
3594 d8052456 Iustin Pop
              else:
3595 d8052456 Iustin Pop
                val = "ERROR_up"
3596 d8052456 Iustin Pop
            else:
3597 0d68c45d Iustin Pop
              if instance.admin_up:
3598 d8052456 Iustin Pop
                val = "ERROR_down"
3599 d8052456 Iustin Pop
              else:
3600 d8052456 Iustin Pop
                val = "ADMIN_down"
3601 a8083063 Iustin Pop
        elif field == "oper_ram":
3602 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
3603 8a23d2d3 Iustin Pop
            val = None
3604 a8083063 Iustin Pop
          elif instance.name in live_data:
3605 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
3606 a8083063 Iustin Pop
          else:
3607 a8083063 Iustin Pop
            val = "-"
3608 c1ce76bb Iustin Pop
        elif field == "vcpus":
3609 c1ce76bb Iustin Pop
          val = i_be[constants.BE_VCPUS]
3610 a8083063 Iustin Pop
        elif field == "disk_template":
3611 a8083063 Iustin Pop
          val = instance.disk_template
3612 a8083063 Iustin Pop
        elif field == "ip":
3613 39a02558 Guido Trotter
          if instance.nics:
3614 39a02558 Guido Trotter
            val = instance.nics[0].ip
3615 39a02558 Guido Trotter
          else:
3616 39a02558 Guido Trotter
            val = None
3617 a8083063 Iustin Pop
        elif field == "bridge":
3618 39a02558 Guido Trotter
          if instance.nics:
3619 39a02558 Guido Trotter
            val = instance.nics[0].bridge
3620 39a02558 Guido Trotter
          else:
3621 39a02558 Guido Trotter
            val = None
3622 a8083063 Iustin Pop
        elif field == "mac":
3623 39a02558 Guido Trotter
          if instance.nics:
3624 39a02558 Guido Trotter
            val = instance.nics[0].mac
3625 39a02558 Guido Trotter
          else:
3626 39a02558 Guido Trotter
            val = None
3627 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
3628 ad24e046 Iustin Pop
          idx = ord(field[2]) - ord('a')
3629 ad24e046 Iustin Pop
          try:
3630 ad24e046 Iustin Pop
            val = instance.FindDisk(idx).size
3631 ad24e046 Iustin Pop
          except errors.OpPrereqError:
3632 8a23d2d3 Iustin Pop
            val = None
3633 024e157f Iustin Pop
        elif field == "disk_usage": # total disk usage per node
3634 024e157f Iustin Pop
          disk_sizes = [{'size': disk.size} for disk in instance.disks]
3635 024e157f Iustin Pop
          val = _ComputeDiskSize(instance.disk_template, disk_sizes)
3636 130a6a6f Iustin Pop
        elif field == "tags":
3637 130a6a6f Iustin Pop
          val = list(instance.GetTags())
3638 38d7239a Iustin Pop
        elif field == "serial_no":
3639 38d7239a Iustin Pop
          val = instance.serial_no
3640 5018a335 Iustin Pop
        elif field == "network_port":
3641 5018a335 Iustin Pop
          val = instance.network_port
3642 338e51e8 Iustin Pop
        elif field == "hypervisor":
3643 338e51e8 Iustin Pop
          val = instance.hypervisor
3644 338e51e8 Iustin Pop
        elif field == "hvparams":
3645 338e51e8 Iustin Pop
          val = i_hv
3646 5018a335 Iustin Pop
        elif (field.startswith(HVPREFIX) and
3647 5018a335 Iustin Pop
              field[len(HVPREFIX):] in constants.HVS_PARAMETERS):
3648 5018a335 Iustin Pop
          val = i_hv.get(field[len(HVPREFIX):], None)
3649 338e51e8 Iustin Pop
        elif field == "beparams":
3650 338e51e8 Iustin Pop
          val = i_be
3651 338e51e8 Iustin Pop
        elif (field.startswith(BEPREFIX) and
3652 338e51e8 Iustin Pop
              field[len(BEPREFIX):] in constants.BES_PARAMETERS):
3653 338e51e8 Iustin Pop
          val = i_be.get(field[len(BEPREFIX):], None)
3654 71c1af58 Iustin Pop
        elif st_match and st_match.groups():
3655 71c1af58 Iustin Pop
          # matches a variable list
3656 71c1af58 Iustin Pop
          st_groups = st_match.groups()
3657 71c1af58 Iustin Pop
          if st_groups and st_groups[0] == "disk":
3658 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3659 71c1af58 Iustin Pop
              val = len(instance.disks)
3660 41a776da Iustin Pop
            elif st_groups[1] == "sizes":
3661 41a776da Iustin Pop
              val = [disk.size for disk in instance.disks]
3662 71c1af58 Iustin Pop
            elif st_groups[1] == "size":
3663 3e0cea06 Iustin Pop
              try:
3664 3e0cea06 Iustin Pop
                val = instance.FindDisk(st_groups[2]).size
3665 3e0cea06 Iustin Pop
              except errors.OpPrereqError:
3666 71c1af58 Iustin Pop
                val = None
3667 71c1af58 Iustin Pop
            else:
3668 71c1af58 Iustin Pop
              assert False, "Unhandled disk parameter"
3669 71c1af58 Iustin Pop
          elif st_groups[0] == "nic":
3670 71c1af58 Iustin Pop
            if st_groups[1] == "count":
3671 71c1af58 Iustin Pop
              val = len(instance.nics)
3672 41a776da Iustin Pop
            elif st_groups[1] == "macs":
3673 41a776da Iustin Pop
              val = [nic.mac for nic in instance.nics]
3674 41a776da Iustin Pop
            elif st_groups[1] == "ips":
3675 41a776da Iustin Pop
              val = [nic.ip for nic in instance.nics]
3676 41a776da Iustin Pop
            elif st_groups[1] == "bridges":
3677 41a776da Iustin Pop
              val = [nic.bridge for nic in instance.nics]
3678 71c1af58 Iustin Pop
            else:
3679 71c1af58 Iustin Pop
              # index-based item
3680 71c1af58 Iustin Pop
              nic_idx = int(st_groups[2])
3681 71c1af58 Iustin Pop
              if nic_idx >= len(instance.nics):
3682 71c1af58 Iustin Pop
                val = None
3683 71c1af58 Iustin Pop
              else:
3684 71c1af58 Iustin Pop
                if st_groups[1] == "mac":
3685 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].mac
3686 71c1af58 Iustin Pop
                elif st_groups[1] == "ip":
3687 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].ip
3688 71c1af58 Iustin Pop
                elif st_groups[1] == "bridge":
3689 71c1af58 Iustin Pop
                  val = instance.nics[nic_idx].bridge
3690 71c1af58 Iustin Pop
                else:
3691 71c1af58 Iustin Pop
                  assert False, "Unhandled NIC parameter"
3692 71c1af58 Iustin Pop
          else:
3693 c1ce76bb Iustin Pop
            assert False, ("Declared but unhandled variable parameter '%s'" %
3694 c1ce76bb Iustin Pop
                           field)
3695 a8083063 Iustin Pop
        else:
3696 c1ce76bb Iustin Pop
          assert False, "Declared but unhandled parameter '%s'" % field
3697 a8083063 Iustin Pop
        iout.append(val)
3698 a8083063 Iustin Pop
      output.append(iout)
3699 a8083063 Iustin Pop
3700 a8083063 Iustin Pop
    return output
3701 a8083063 Iustin Pop
3702 a8083063 Iustin Pop
3703 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
3704 a8083063 Iustin Pop
  """Failover an instance.
3705 a8083063 Iustin Pop

3706 a8083063 Iustin Pop
  """
3707 a8083063 Iustin Pop
  HPATH = "instance-failover"
3708 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3709 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
3710 c9e5c064 Guido Trotter
  REQ_BGL = False
3711 c9e5c064 Guido Trotter
3712 c9e5c064 Guido Trotter
  def ExpandNames(self):
3713 c9e5c064 Guido Trotter
    self._ExpandAndLockInstance()
3714 c9e5c064 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
3715 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3716 c9e5c064 Guido Trotter
3717 c9e5c064 Guido Trotter
  def DeclareLocks(self, level):
3718 c9e5c064 Guido Trotter
    if level == locking.LEVEL_NODE:
3719 c9e5c064 Guido Trotter
      self._LockInstancesNodes()
3720 a8083063 Iustin Pop
3721 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3722 a8083063 Iustin Pop
    """Build hooks env.
3723 a8083063 Iustin Pop

3724 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3725 a8083063 Iustin Pop

3726 a8083063 Iustin Pop
    """
3727 a8083063 Iustin Pop
    env = {
3728 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
3729 a8083063 Iustin Pop
      }
3730 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
3731 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3732 a8083063 Iustin Pop
    return env, nl, nl
3733 a8083063 Iustin Pop
3734 a8083063 Iustin Pop
  def CheckPrereq(self):
3735 a8083063 Iustin Pop
    """Check prerequisites.
3736 a8083063 Iustin Pop

3737 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3738 a8083063 Iustin Pop

3739 a8083063 Iustin Pop
    """
3740 c9e5c064 Guido Trotter
    self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3741 c9e5c064 Guido Trotter
    assert self.instance is not None, \
3742 c9e5c064 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
3743 a8083063 Iustin Pop
3744 338e51e8 Iustin Pop
    bep = self.cfg.GetClusterInfo().FillBE(instance)
3745 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3746 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
3747 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
3748 2a710df1 Michael Hanselmann
3749 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
3750 2a710df1 Michael Hanselmann
    if not secondary_nodes:
3751 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
3752 abdf0113 Iustin Pop
                                   "a mirrored disk template")
3753 2a710df1 Michael Hanselmann
3754 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
3755 7527a8a4 Iustin Pop
    _CheckNodeOnline(self, target_node)
3756 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, target_node)
3757 d27776f0 Iustin Pop
3758 d27776f0 Iustin Pop
    if instance.admin_up:
3759 d27776f0 Iustin Pop
      # check memory requirements on the secondary node
3760 d27776f0 Iustin Pop
      _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
3761 d27776f0 Iustin Pop
                           instance.name, bep[constants.BE_MEMORY],
3762 d27776f0 Iustin Pop
                           instance.hypervisor)
3763 d27776f0 Iustin Pop
    else:
3764 d27776f0 Iustin Pop
      self.LogInfo("Not checking memory on the secondary node as"
3765 d27776f0 Iustin Pop
                   " instance will not be started")
3766 3a7c308e Guido Trotter
3767 5bbd3f7f Michael Hanselmann
    # check bridge existence
3768 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3769 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3770 781de953 Iustin Pop
    result.Raise()
3771 781de953 Iustin Pop
    if not result.data:
3772 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3773 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
3774 50ff9a7a Iustin Pop
                                 (brlist, target_node))
3775 a8083063 Iustin Pop
3776 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3777 a8083063 Iustin Pop
    """Failover an instance.
3778 a8083063 Iustin Pop

3779 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
3780 a8083063 Iustin Pop
    starting it on the secondary.
3781 a8083063 Iustin Pop

3782 a8083063 Iustin Pop
    """
3783 a8083063 Iustin Pop
    instance = self.instance
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
    source_node = instance.primary_node
3786 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
3787 a8083063 Iustin Pop
3788 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
3789 a8083063 Iustin Pop
    for dev in instance.disks:
3790 abdf0113 Iustin Pop
      # for drbd, these are drbd over lvm
3791 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
3792 0d68c45d Iustin Pop
        if instance.admin_up and not self.op.ignore_consistency:
3793 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
3794 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
3795 a8083063 Iustin Pop
3796 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
3797 9a4f63d1 Iustin Pop
    logging.info("Shutting down instance %s on node %s",
3798 9a4f63d1 Iustin Pop
                 instance.name, source_node)
3799 a8083063 Iustin Pop
3800 781de953 Iustin Pop
    result = self.rpc.call_instance_shutdown(source_node, instance)
3801 1fae010f Iustin Pop
    msg = result.RemoteFailMsg()
3802 1fae010f Iustin Pop
    if msg:
3803 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
3804 86d9d3bb Iustin Pop
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
3805 1fae010f Iustin Pop
                             " Proceeding anyway. Please make sure node"
3806 1fae010f Iustin Pop
                             " %s is down. Error details: %s",
3807 1fae010f Iustin Pop
                             instance.name, source_node, source_node, msg)
3808 24a40d57 Iustin Pop
      else:
3809 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
3810 1fae010f Iustin Pop
                                 " node %s: %s" %
3811 1fae010f Iustin Pop
                                 (instance.name, source_node, msg))
3812 a8083063 Iustin Pop
3813 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
3814 b9bddb6b Iustin Pop
    if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
3815 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
3816 a8083063 Iustin Pop
3817 a8083063 Iustin Pop
    instance.primary_node = target_node
3818 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
3819 b6102dab Guido Trotter
    self.cfg.Update(instance)
3820 a8083063 Iustin Pop
3821 12a0cfbe Guido Trotter
    # Only start the instance if it's marked as up
3822 0d68c45d Iustin Pop
    if instance.admin_up:
3823 12a0cfbe Guido Trotter
      feedback_fn("* activating the instance's disks on target node")
3824 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s",
3825 9a4f63d1 Iustin Pop
                   instance.name, target_node)
3826 12a0cfbe Guido Trotter
3827 7c4d6c7b Michael Hanselmann
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3828 12a0cfbe Guido Trotter
                                               ignore_secondaries=True)
3829 12a0cfbe Guido Trotter
      if not disks_ok:
3830 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3831 12a0cfbe Guido Trotter
        raise errors.OpExecError("Can't activate the instance's disks")
3832 a8083063 Iustin Pop
3833 12a0cfbe Guido Trotter
      feedback_fn("* starting the instance on the target node")
3834 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3835 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
3836 dd279568 Iustin Pop
      if msg:
3837 b9bddb6b Iustin Pop
        _ShutdownInstanceDisks(self, instance)
3838 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
3839 dd279568 Iustin Pop
                                 (instance.name, target_node, msg))
3840 a8083063 Iustin Pop
3841 a8083063 Iustin Pop
3842 53c776b5 Iustin Pop
class LUMigrateInstance(LogicalUnit):
3843 53c776b5 Iustin Pop
  """Migrate an instance.
3844 53c776b5 Iustin Pop

3845 53c776b5 Iustin Pop
  This is migration without shutting down, compared to the failover,
3846 53c776b5 Iustin Pop
  which is done with shutdown.
3847 53c776b5 Iustin Pop

3848 53c776b5 Iustin Pop
  """
3849 53c776b5 Iustin Pop
  HPATH = "instance-migrate"
3850 53c776b5 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3851 53c776b5 Iustin Pop
  _OP_REQP = ["instance_name", "live", "cleanup"]
3852 53c776b5 Iustin Pop
3853 53c776b5 Iustin Pop
  REQ_BGL = False
3854 53c776b5 Iustin Pop
3855 53c776b5 Iustin Pop
  def ExpandNames(self):
3856 53c776b5 Iustin Pop
    self._ExpandAndLockInstance()
3857 53c776b5 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
3858 53c776b5 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
3859 53c776b5 Iustin Pop
3860 53c776b5 Iustin Pop
  def DeclareLocks(self, level):
3861 53c776b5 Iustin Pop
    if level == locking.LEVEL_NODE:
3862 53c776b5 Iustin Pop
      self._LockInstancesNodes()
3863 53c776b5 Iustin Pop
3864 53c776b5 Iustin Pop
  def BuildHooksEnv(self):
3865 53c776b5 Iustin Pop
    """Build hooks env.
3866 53c776b5 Iustin Pop

3867 53c776b5 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
3868 53c776b5 Iustin Pop

3869 53c776b5 Iustin Pop
    """
3870 53c776b5 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance)
3871 2c2690c9 Iustin Pop
    env["MIGRATE_LIVE"] = self.op.live
3872 2c2690c9 Iustin Pop
    env["MIGRATE_CLEANUP"] = self.op.cleanup
3873 53c776b5 Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
3874 53c776b5 Iustin Pop
    return env, nl, nl
3875 53c776b5 Iustin Pop
3876 53c776b5 Iustin Pop
  def CheckPrereq(self):
3877 53c776b5 Iustin Pop
    """Check prerequisites.
3878 53c776b5 Iustin Pop

3879 53c776b5 Iustin Pop
    This checks that the instance is in the cluster.
3880 53c776b5 Iustin Pop

3881 53c776b5 Iustin Pop
    """
3882 53c776b5 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3883 53c776b5 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3884 53c776b5 Iustin Pop
    if instance is None:
3885 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3886 53c776b5 Iustin Pop
                                 self.op.instance_name)
3887 53c776b5 Iustin Pop
3888 53c776b5 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
3889 53c776b5 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3890 53c776b5 Iustin Pop
                                 " drbd8, cannot migrate.")
3891 53c776b5 Iustin Pop
3892 53c776b5 Iustin Pop
    secondary_nodes = instance.secondary_nodes
3893 53c776b5 Iustin Pop
    if not secondary_nodes:
3894 733a2b6a Iustin Pop
      raise errors.ConfigurationError("No secondary node but using"
3895 733a2b6a Iustin Pop
                                      " drbd8 disk template")
3896 53c776b5 Iustin Pop
3897 53c776b5 Iustin Pop
    i_be = self.cfg.GetClusterInfo().FillBE(instance)
3898 53c776b5 Iustin Pop
3899 53c776b5 Iustin Pop
    target_node = secondary_nodes[0]
3900 53c776b5 Iustin Pop
    # check memory requirements on the secondary node
3901 53c776b5 Iustin Pop
    _CheckNodeFreeMemory(self, target_node, "migrating instance %s" %
3902 53c776b5 Iustin Pop
                         instance.name, i_be[constants.BE_MEMORY],
3903 53c776b5 Iustin Pop
                         instance.hypervisor)
3904 53c776b5 Iustin Pop
3905 5bbd3f7f Michael Hanselmann
    # check bridge existence
3906 53c776b5 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
3907 53c776b5 Iustin Pop
    result = self.rpc.call_bridges_exist(target_node, brlist)
3908 53c776b5 Iustin Pop
    if result.failed or not result.data:
3909 53c776b5 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
3910 53c776b5 Iustin Pop
                                 " exist on destination node '%s'" %
3911 53c776b5 Iustin Pop
                                 (brlist, target_node))
3912 53c776b5 Iustin Pop
3913 53c776b5 Iustin Pop
    if not self.op.cleanup:
3914 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, target_node)
3915 53c776b5 Iustin Pop
      result = self.rpc.call_instance_migratable(instance.primary_node,
3916 53c776b5 Iustin Pop
                                                 instance)
3917 53c776b5 Iustin Pop
      msg = result.RemoteFailMsg()
3918 53c776b5 Iustin Pop
      if msg:
3919 53c776b5 Iustin Pop
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3920 53c776b5 Iustin Pop
                                   msg)
3921 53c776b5 Iustin Pop
3922 53c776b5 Iustin Pop
    self.instance = instance
3923 53c776b5 Iustin Pop
3924 53c776b5 Iustin Pop
  def _WaitUntilSync(self):
3925 53c776b5 Iustin Pop
    """Poll with custom rpc for disk sync.
3926 53c776b5 Iustin Pop

3927 53c776b5 Iustin Pop
    This uses our own step-based rpc call.
3928 53c776b5 Iustin Pop

3929 53c776b5 Iustin Pop
    """
3930 53c776b5 Iustin Pop
    self.feedback_fn("* wait until resync is done")
3931 53c776b5 Iustin Pop
    all_done = False
3932 53c776b5 Iustin Pop
    while not all_done:
3933 53c776b5 Iustin Pop
      all_done = True
3934 53c776b5 Iustin Pop
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
3935 53c776b5 Iustin Pop
                                            self.nodes_ip,
3936 53c776b5 Iustin Pop
                                            self.instance.disks)
3937 53c776b5 Iustin Pop
      min_percent = 100
3938 53c776b5 Iustin Pop
      for node, nres in result.items():
3939 53c776b5 Iustin Pop
        msg = nres.RemoteFailMsg()
3940 53c776b5 Iustin Pop
        if msg:
3941 53c776b5 Iustin Pop
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3942 53c776b5 Iustin Pop
                                   (node, msg))
3943 0959c824 Iustin Pop
        node_done, node_percent = nres.payload
3944 53c776b5 Iustin Pop
        all_done = all_done and node_done
3945 53c776b5 Iustin Pop
        if node_percent is not None:
3946 53c776b5 Iustin Pop
          min_percent = min(min_percent, node_percent)
3947 53c776b5 Iustin Pop
      if not all_done:
3948 53c776b5 Iustin Pop
        if min_percent < 100:
3949 53c776b5 Iustin Pop
          self.feedback_fn("   - progress: %.1f%%" % min_percent)
3950 53c776b5 Iustin Pop
        time.sleep(2)
3951 53c776b5 Iustin Pop
3952 53c776b5 Iustin Pop
  def _EnsureSecondary(self, node):
3953 53c776b5 Iustin Pop
    """Demote a node to secondary.
3954 53c776b5 Iustin Pop

3955 53c776b5 Iustin Pop
    """
3956 53c776b5 Iustin Pop
    self.feedback_fn("* switching node %s to secondary mode" % node)
3957 53c776b5 Iustin Pop
3958 53c776b5 Iustin Pop
    for dev in self.instance.disks:
3959 53c776b5 Iustin Pop
      self.cfg.SetDiskID(dev, node)
3960 53c776b5 Iustin Pop
3961 53c776b5 Iustin Pop
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3962 53c776b5 Iustin Pop
                                          self.instance.disks)
3963 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
3964 53c776b5 Iustin Pop
    if msg:
3965 53c776b5 Iustin Pop
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3966 53c776b5 Iustin Pop
                               " error %s" % (node, msg))
3967 53c776b5 Iustin Pop
3968 53c776b5 Iustin Pop
  def _GoStandalone(self):
3969 53c776b5 Iustin Pop
    """Disconnect from the network.
3970 53c776b5 Iustin Pop

3971 53c776b5 Iustin Pop
    """
3972 53c776b5 Iustin Pop
    self.feedback_fn("* changing into standalone mode")
3973 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3974 53c776b5 Iustin Pop
                                               self.instance.disks)
3975 53c776b5 Iustin Pop
    for node, nres in result.items():
3976 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3977 53c776b5 Iustin Pop
      if msg:
3978 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3979 53c776b5 Iustin Pop
                                 " error %s" % (node, msg))
3980 53c776b5 Iustin Pop
3981 53c776b5 Iustin Pop
  def _GoReconnect(self, multimaster):
3982 53c776b5 Iustin Pop
    """Reconnect to the network.
3983 53c776b5 Iustin Pop

3984 53c776b5 Iustin Pop
    """
3985 53c776b5 Iustin Pop
    if multimaster:
3986 53c776b5 Iustin Pop
      msg = "dual-master"
3987 53c776b5 Iustin Pop
    else:
3988 53c776b5 Iustin Pop
      msg = "single-master"
3989 53c776b5 Iustin Pop
    self.feedback_fn("* changing disks into %s mode" % msg)
3990 53c776b5 Iustin Pop
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
3991 53c776b5 Iustin Pop
                                           self.instance.disks,
3992 53c776b5 Iustin Pop
                                           self.instance.name, multimaster)
3993 53c776b5 Iustin Pop
    for node, nres in result.items():
3994 53c776b5 Iustin Pop
      msg = nres.RemoteFailMsg()
3995 53c776b5 Iustin Pop
      if msg:
3996 53c776b5 Iustin Pop
        raise errors.OpExecError("Cannot change disks config on node %s,"
3997 53c776b5 Iustin Pop
                                 " error: %s" % (node, msg))
3998 53c776b5 Iustin Pop
3999 53c776b5 Iustin Pop
  def _ExecCleanup(self):
4000 53c776b5 Iustin Pop
    """Try to cleanup after a failed migration.
4001 53c776b5 Iustin Pop

4002 53c776b5 Iustin Pop
    The cleanup is done by:
4003 53c776b5 Iustin Pop
      - check that the instance is running only on one node
4004 53c776b5 Iustin Pop
        (and update the config if needed)
4005 53c776b5 Iustin Pop
      - change disks on its secondary node to secondary
4006 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4007 53c776b5 Iustin Pop
      - disconnect from the network
4008 53c776b5 Iustin Pop
      - change disks into single-master mode
4009 53c776b5 Iustin Pop
      - wait again until disks are fully synchronized
4010 53c776b5 Iustin Pop

4011 53c776b5 Iustin Pop
    """
4012 53c776b5 Iustin Pop
    instance = self.instance
4013 53c776b5 Iustin Pop
    target_node = self.target_node
4014 53c776b5 Iustin Pop
    source_node = self.source_node
4015 53c776b5 Iustin Pop
4016 53c776b5 Iustin Pop
    # check running on only one node
4017 53c776b5 Iustin Pop
    self.feedback_fn("* checking where the instance actually runs"
4018 53c776b5 Iustin Pop
                     " (if this hangs, the hypervisor might be in"
4019 53c776b5 Iustin Pop
                     " a bad state)")
4020 53c776b5 Iustin Pop
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
4021 53c776b5 Iustin Pop
    for node, result in ins_l.items():
4022 53c776b5 Iustin Pop
      result.Raise()
4023 53c776b5 Iustin Pop
      if not isinstance(result.data, list):
4024 53c776b5 Iustin Pop
        raise errors.OpExecError("Can't contact node '%s'" % node)
4025 53c776b5 Iustin Pop
4026 53c776b5 Iustin Pop
    runningon_source = instance.name in ins_l[source_node].data
4027 53c776b5 Iustin Pop
    runningon_target = instance.name in ins_l[target_node].data
4028 53c776b5 Iustin Pop
4029 53c776b5 Iustin Pop
    if runningon_source and runningon_target:
4030 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance seems to be running on two nodes,"
4031 53c776b5 Iustin Pop
                               " or the hypervisor is confused. You will have"
4032 53c776b5 Iustin Pop
                               " to ensure manually that it runs only on one"
4033 53c776b5 Iustin Pop
                               " and restart this operation.")
4034 53c776b5 Iustin Pop
4035 53c776b5 Iustin Pop
    if not (runningon_source or runningon_target):
4036 53c776b5 Iustin Pop
      raise errors.OpExecError("Instance does not seem to be running at all."
4037 53c776b5 Iustin Pop
                               " In this case, it's safer to repair by"
4038 53c776b5 Iustin Pop
                               " running 'gnt-instance stop' to ensure disk"
4039 53c776b5 Iustin Pop
                               " shutdown, and then restarting it.")
4040 53c776b5 Iustin Pop
4041 53c776b5 Iustin Pop
    if runningon_target:
4042 53c776b5 Iustin Pop
      # the migration has actually succeeded, we need to update the config
4043 53c776b5 Iustin Pop
      self.feedback_fn("* instance running on secondary node (%s),"
4044 53c776b5 Iustin Pop
                       " updating config" % target_node)
4045 53c776b5 Iustin Pop
      instance.primary_node = target_node
4046 53c776b5 Iustin Pop
      self.cfg.Update(instance)
4047 53c776b5 Iustin Pop
      demoted_node = source_node
4048 53c776b5 Iustin Pop
    else:
4049 53c776b5 Iustin Pop
      self.feedback_fn("* instance confirmed to be running on its"
4050 53c776b5 Iustin Pop
                       " primary node (%s)" % source_node)
4051 53c776b5 Iustin Pop
      demoted_node = target_node
4052 53c776b5 Iustin Pop
4053 53c776b5 Iustin Pop
    self._EnsureSecondary(demoted_node)
4054 53c776b5 Iustin Pop
    try:
4055 53c776b5 Iustin Pop
      self._WaitUntilSync()
4056 53c776b5 Iustin Pop
    except errors.OpExecError:
4057 53c776b5 Iustin Pop
      # we ignore here errors, since if the device is standalone, it
4058 53c776b5 Iustin Pop
      # won't be able to sync
4059 53c776b5 Iustin Pop
      pass
4060 53c776b5 Iustin Pop
    self._GoStandalone()
4061 53c776b5 Iustin Pop
    self._GoReconnect(False)
4062 53c776b5 Iustin Pop
    self._WaitUntilSync()
4063 53c776b5 Iustin Pop
4064 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4065 53c776b5 Iustin Pop
4066 6906a9d8 Guido Trotter
  def _RevertDiskStatus(self):
4067 6906a9d8 Guido Trotter
    """Try to revert the disk status after a failed migration.
4068 6906a9d8 Guido Trotter

4069 6906a9d8 Guido Trotter
    """
4070 6906a9d8 Guido Trotter
    target_node = self.target_node
4071 6906a9d8 Guido Trotter
    try:
4072 6906a9d8 Guido Trotter
      self._EnsureSecondary(target_node)
4073 6906a9d8 Guido Trotter
      self._GoStandalone()
4074 6906a9d8 Guido Trotter
      self._GoReconnect(False)
4075 6906a9d8 Guido Trotter
      self._WaitUntilSync()
4076 6906a9d8 Guido Trotter
    except errors.OpExecError, err:
4077 6906a9d8 Guido Trotter
      self.LogWarning("Migration failed and I can't reconnect the"
4078 6906a9d8 Guido Trotter
                      " drives: error '%s'\n"
4079 6906a9d8 Guido Trotter
                      "Please look and recover the instance status" %
4080 6906a9d8 Guido Trotter
                      str(err))
4081 6906a9d8 Guido Trotter
4082 6906a9d8 Guido Trotter
  def _AbortMigration(self):
4083 6906a9d8 Guido Trotter
    """Call the hypervisor code to abort a started migration.
4084 6906a9d8 Guido Trotter

4085 6906a9d8 Guido Trotter
    """
4086 6906a9d8 Guido Trotter
    instance = self.instance
4087 6906a9d8 Guido Trotter
    target_node = self.target_node
4088 6906a9d8 Guido Trotter
    migration_info = self.migration_info
4089 6906a9d8 Guido Trotter
4090 6906a9d8 Guido Trotter
    abort_result = self.rpc.call_finalize_migration(target_node,
4091 6906a9d8 Guido Trotter
                                                    instance,
4092 6906a9d8 Guido Trotter
                                                    migration_info,
4093 6906a9d8 Guido Trotter
                                                    False)
4094 6906a9d8 Guido Trotter
    abort_msg = abort_result.RemoteFailMsg()
4095 6906a9d8 Guido Trotter
    if abort_msg:
4096 6906a9d8 Guido Trotter
      logging.error("Aborting migration failed on target node %s: %s" %
4097 6906a9d8 Guido Trotter
                    (target_node, abort_msg))
4098 6906a9d8 Guido Trotter
      # Don't raise an exception here, as we stil have to try to revert the
4099 6906a9d8 Guido Trotter
      # disk status, even if this step failed.
4100 6906a9d8 Guido Trotter
4101 53c776b5 Iustin Pop
  def _ExecMigration(self):
4102 53c776b5 Iustin Pop
    """Migrate an instance.
4103 53c776b5 Iustin Pop

4104 53c776b5 Iustin Pop
    The migrate is done by:
4105 53c776b5 Iustin Pop
      - change the disks into dual-master mode
4106 53c776b5 Iustin Pop
      - wait until disks are fully synchronized again
4107 53c776b5 Iustin Pop
      - migrate the instance
4108 53c776b5 Iustin Pop
      - change disks on the new secondary node (the old primary) to secondary
4109 53c776b5 Iustin Pop
      - wait until disks are fully synchronized
4110 53c776b5 Iustin Pop
      - change disks into single-master mode
4111 53c776b5 Iustin Pop

4112 53c776b5 Iustin Pop
    """
4113 53c776b5 Iustin Pop
    instance = self.instance
4114 53c776b5 Iustin Pop
    target_node = self.target_node
4115 53c776b5 Iustin Pop
    source_node = self.source_node
4116 53c776b5 Iustin Pop
4117 53c776b5 Iustin Pop
    self.feedback_fn("* checking disk consistency between source and target")
4118 53c776b5 Iustin Pop
    for dev in instance.disks:
4119 53c776b5 Iustin Pop
      if not _CheckDiskConsistency(self, dev, target_node, False):
4120 53c776b5 Iustin Pop
        raise errors.OpExecError("Disk %s is degraded or not fully"
4121 53c776b5 Iustin Pop
                                 " synchronized on target node,"
4122 53c776b5 Iustin Pop
                                 " aborting migrate." % dev.iv_name)
4123 53c776b5 Iustin Pop
4124 6906a9d8 Guido Trotter
    # First get the migration information from the remote node
4125 6906a9d8 Guido Trotter
    result = self.rpc.call_migration_info(source_node, instance)
4126 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4127 6906a9d8 Guido Trotter
    if msg:
4128 6906a9d8 Guido Trotter
      log_err = ("Failed fetching source migration information from %s: %s" %
4129 0959c824 Iustin Pop
                 (source_node, msg))
4130 6906a9d8 Guido Trotter
      logging.error(log_err)
4131 6906a9d8 Guido Trotter
      raise errors.OpExecError(log_err)
4132 6906a9d8 Guido Trotter
4133 0959c824 Iustin Pop
    self.migration_info = migration_info = result.payload
4134 6906a9d8 Guido Trotter
4135 6906a9d8 Guido Trotter
    # Then switch the disks to master/master mode
4136 53c776b5 Iustin Pop
    self._EnsureSecondary(target_node)
4137 53c776b5 Iustin Pop
    self._GoStandalone()
4138 53c776b5 Iustin Pop
    self._GoReconnect(True)
4139 53c776b5 Iustin Pop
    self._WaitUntilSync()
4140 53c776b5 Iustin Pop
4141 6906a9d8 Guido Trotter
    self.feedback_fn("* preparing %s to accept the instance" % target_node)
4142 6906a9d8 Guido Trotter
    result = self.rpc.call_accept_instance(target_node,
4143 6906a9d8 Guido Trotter
                                           instance,
4144 6906a9d8 Guido Trotter
                                           migration_info,
4145 6906a9d8 Guido Trotter
                                           self.nodes_ip[target_node])
4146 6906a9d8 Guido Trotter
4147 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4148 6906a9d8 Guido Trotter
    if msg:
4149 6906a9d8 Guido Trotter
      logging.error("Instance pre-migration failed, trying to revert"
4150 6906a9d8 Guido Trotter
                    " disk status: %s", msg)
4151 6906a9d8 Guido Trotter
      self._AbortMigration()
4152 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4153 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
4154 6906a9d8 Guido Trotter
                               (instance.name, msg))
4155 6906a9d8 Guido Trotter
4156 53c776b5 Iustin Pop
    self.feedback_fn("* migrating instance to %s" % target_node)
4157 53c776b5 Iustin Pop
    time.sleep(10)
4158 53c776b5 Iustin Pop
    result = self.rpc.call_instance_migrate(source_node, instance,
4159 53c776b5 Iustin Pop
                                            self.nodes_ip[target_node],
4160 53c776b5 Iustin Pop
                                            self.op.live)
4161 53c776b5 Iustin Pop
    msg = result.RemoteFailMsg()
4162 53c776b5 Iustin Pop
    if msg:
4163 53c776b5 Iustin Pop
      logging.error("Instance migration failed, trying to revert"
4164 53c776b5 Iustin Pop
                    " disk status: %s", msg)
4165 6906a9d8 Guido Trotter
      self._AbortMigration()
4166 6906a9d8 Guido Trotter
      self._RevertDiskStatus()
4167 53c776b5 Iustin Pop
      raise errors.OpExecError("Could not migrate instance %s: %s" %
4168 53c776b5 Iustin Pop
                               (instance.name, msg))
4169 53c776b5 Iustin Pop
    time.sleep(10)
4170 53c776b5 Iustin Pop
4171 53c776b5 Iustin Pop
    instance.primary_node = target_node
4172 53c776b5 Iustin Pop
    # distribute new instance config to the other nodes
4173 53c776b5 Iustin Pop
    self.cfg.Update(instance)
4174 53c776b5 Iustin Pop
4175 6906a9d8 Guido Trotter
    result = self.rpc.call_finalize_migration(target_node,
4176 6906a9d8 Guido Trotter
                                              instance,
4177 6906a9d8 Guido Trotter
                                              migration_info,
4178 6906a9d8 Guido Trotter
                                              True)
4179 6906a9d8 Guido Trotter
    msg = result.RemoteFailMsg()
4180 6906a9d8 Guido Trotter
    if msg:
4181 6906a9d8 Guido Trotter
      logging.error("Instance migration succeeded, but finalization failed:"
4182 6906a9d8 Guido Trotter
                    " %s" % msg)
4183 6906a9d8 Guido Trotter
      raise errors.OpExecError("Could not finalize instance migration: %s" %
4184 6906a9d8 Guido Trotter
                               msg)
4185 6906a9d8 Guido Trotter
4186 53c776b5 Iustin Pop
    self._EnsureSecondary(source_node)
4187 53c776b5 Iustin Pop
    self._WaitUntilSync()
4188 53c776b5 Iustin Pop
    self._GoStandalone()
4189 53c776b5 Iustin Pop
    self._GoReconnect(False)
4190 53c776b5 Iustin Pop
    self._WaitUntilSync()
4191 53c776b5 Iustin Pop
4192 53c776b5 Iustin Pop
    self.feedback_fn("* done")
4193 53c776b5 Iustin Pop
4194 53c776b5 Iustin Pop
  def Exec(self, feedback_fn):
4195 53c776b5 Iustin Pop
    """Perform the migration.
4196 53c776b5 Iustin Pop

4197 53c776b5 Iustin Pop
    """
4198 53c776b5 Iustin Pop
    self.feedback_fn = feedback_fn
4199 53c776b5 Iustin Pop
4200 53c776b5 Iustin Pop
    self.source_node = self.instance.primary_node
4201 53c776b5 Iustin Pop
    self.target_node = self.instance.secondary_nodes[0]
4202 53c776b5 Iustin Pop
    self.all_nodes = [self.source_node, self.target_node]
4203 53c776b5 Iustin Pop
    self.nodes_ip = {
4204 53c776b5 Iustin Pop
      self.source_node: self.cfg.GetNodeInfo(self.source_node).secondary_ip,
4205 53c776b5 Iustin Pop
      self.target_node: self.cfg.GetNodeInfo(self.target_node).secondary_ip,
4206 53c776b5 Iustin Pop
      }
4207 53c776b5 Iustin Pop
    if self.op.cleanup:
4208 53c776b5 Iustin Pop
      return self._ExecCleanup()
4209 53c776b5 Iustin Pop
    else:
4210 53c776b5 Iustin Pop
      return self._ExecMigration()
4211 53c776b5 Iustin Pop
4212 53c776b5 Iustin Pop
4213 428958aa Iustin Pop
def _CreateBlockDev(lu, node, instance, device, force_create,
4214 428958aa Iustin Pop
                    info, force_open):
4215 428958aa Iustin Pop
  """Create a tree of block devices on a given node.
4216 a8083063 Iustin Pop

4217 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
4218 a8083063 Iustin Pop
  all its children.
4219 a8083063 Iustin Pop

4220 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
4221 a8083063 Iustin Pop

4222 428958aa Iustin Pop
  @param lu: the lu on whose behalf we execute
4223 428958aa Iustin Pop
  @param node: the node on which to create the device
4224 428958aa Iustin Pop
  @type instance: L{objects.Instance}
4225 428958aa Iustin Pop
  @param instance: the instance which owns the device
4226 428958aa Iustin Pop
  @type device: L{objects.Disk}
4227 428958aa Iustin Pop
  @param device: the device to create
4228 428958aa Iustin Pop
  @type force_create: boolean
4229 428958aa Iustin Pop
  @param force_create: whether to force creation of this device; this
4230 428958aa Iustin Pop
      will be change to True whenever we find a device which has
4231 428958aa Iustin Pop
      CreateOnSecondary() attribute
4232 428958aa Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4233 428958aa Iustin Pop
      (this will be represented as a LVM tag)
4234 428958aa Iustin Pop
  @type force_open: boolean
4235 428958aa Iustin Pop
  @param force_open: this parameter will be passes to the
4236 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4237 428958aa Iustin Pop
      whether we run on primary or not, and it affects both
4238 428958aa Iustin Pop
      the child assembly and the device own Open() execution
4239 428958aa Iustin Pop

4240 a8083063 Iustin Pop
  """
4241 a8083063 Iustin Pop
  if device.CreateOnSecondary():
4242 428958aa Iustin Pop
    force_create = True
4243 796cab27 Iustin Pop
4244 a8083063 Iustin Pop
  if device.children:
4245 a8083063 Iustin Pop
    for child in device.children:
4246 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, child, force_create,
4247 428958aa Iustin Pop
                      info, force_open)
4248 a8083063 Iustin Pop
4249 428958aa Iustin Pop
  if not force_create:
4250 796cab27 Iustin Pop
    return
4251 796cab27 Iustin Pop
4252 de12473a Iustin Pop
  _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
4253 de12473a Iustin Pop
4254 de12473a Iustin Pop
4255 de12473a Iustin Pop
def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
4256 de12473a Iustin Pop
  """Create a single block device on a given node.
4257 de12473a Iustin Pop

4258 de12473a Iustin Pop
  This will not recurse over children of the device, so they must be
4259 de12473a Iustin Pop
  created in advance.
4260 de12473a Iustin Pop

4261 de12473a Iustin Pop
  @param lu: the lu on whose behalf we execute
4262 de12473a Iustin Pop
  @param node: the node on which to create the device
4263 de12473a Iustin Pop
  @type instance: L{objects.Instance}
4264 de12473a Iustin Pop
  @param instance: the instance which owns the device
4265 de12473a Iustin Pop
  @type device: L{objects.Disk}
4266 de12473a Iustin Pop
  @param device: the device to create
4267 de12473a Iustin Pop
  @param info: the extra 'metadata' we should attach to the device
4268 de12473a Iustin Pop
      (this will be represented as a LVM tag)
4269 de12473a Iustin Pop
  @type force_open: boolean
4270 de12473a Iustin Pop
  @param force_open: this parameter will be passes to the
4271 821d1bd1 Iustin Pop
      L{backend.BlockdevCreate} function where it specifies
4272 de12473a Iustin Pop
      whether we run on primary or not, and it affects both
4273 de12473a Iustin Pop
      the child assembly and the device own Open() execution
4274 de12473a Iustin Pop

4275 de12473a Iustin Pop
  """
4276 b9bddb6b Iustin Pop
  lu.cfg.SetDiskID(device, node)
4277 7d81697f Iustin Pop
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4278 428958aa Iustin Pop
                                       instance.name, force_open, info)
4279 7d81697f Iustin Pop
  msg = result.RemoteFailMsg()
4280 7d81697f Iustin Pop
  if msg:
4281 428958aa Iustin Pop
    raise errors.OpExecError("Can't create block device %s on"
4282 7d81697f Iustin Pop
                             " node %s for instance %s: %s" %
4283 7d81697f Iustin Pop
                             (device, node, instance.name, msg))
4284 a8083063 Iustin Pop
  if device.physical_id is None:
4285 0959c824 Iustin Pop
    device.physical_id = result.payload
4286 a8083063 Iustin Pop
4287 a8083063 Iustin Pop
4288 b9bddb6b Iustin Pop
def _GenerateUniqueNames(lu, exts):
4289 923b1523 Iustin Pop
  """Generate a suitable LV name.
4290 923b1523 Iustin Pop

4291 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
4292 923b1523 Iustin Pop

4293 923b1523 Iustin Pop
  """
4294 923b1523 Iustin Pop
  results = []
4295 923b1523 Iustin Pop
  for val in exts:
4296 b9bddb6b Iustin Pop
    new_id = lu.cfg.GenerateUniqueID()
4297 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
4298 923b1523 Iustin Pop
  return results
4299 923b1523 Iustin Pop
4300 923b1523 Iustin Pop
4301 b9bddb6b Iustin Pop
def _GenerateDRBD8Branch(lu, primary, secondary, size, names, iv_name,
4302 ffa1c0dc Iustin Pop
                         p_minor, s_minor):
4303 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
4304 a1f445d3 Iustin Pop

4305 a1f445d3 Iustin Pop
  """
4306 b9bddb6b Iustin Pop
  port = lu.cfg.AllocatePort()
4307 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4308 b9bddb6b Iustin Pop
  shared_secret = lu.cfg.GenerateDRBDSecret()
4309 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
4310 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
4311 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
4312 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
4313 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
4314 ffa1c0dc Iustin Pop
                          logical_id=(primary, secondary, port,
4315 f9518d38 Iustin Pop
                                      p_minor, s_minor,
4316 f9518d38 Iustin Pop
                                      shared_secret),
4317 ffa1c0dc Iustin Pop
                          children=[dev_data, dev_meta],
4318 a1f445d3 Iustin Pop
                          iv_name=iv_name)
4319 a1f445d3 Iustin Pop
  return drbd_dev
4320 a1f445d3 Iustin Pop
4321 7c0d6283 Michael Hanselmann
4322 b9bddb6b Iustin Pop
def _GenerateDiskTemplate(lu, template_name,
4323 a8083063 Iustin Pop
                          instance_name, primary_node,
4324 08db7c5c Iustin Pop
                          secondary_nodes, disk_info,
4325 e2a65344 Iustin Pop
                          file_storage_dir, file_driver,
4326 e2a65344 Iustin Pop
                          base_index):
4327 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
4328 a8083063 Iustin Pop

4329 a8083063 Iustin Pop
  """
4330 a8083063 Iustin Pop
  #TODO: compute space requirements
4331 a8083063 Iustin Pop
4332 b9bddb6b Iustin Pop
  vgname = lu.cfg.GetVGName()
4333 08db7c5c Iustin Pop
  disk_count = len(disk_info)
4334 08db7c5c Iustin Pop
  disks = []
4335 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
4336 08db7c5c Iustin Pop
    pass
4337 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
4338 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
4339 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4340 923b1523 Iustin Pop
4341 fb4b324b Guido Trotter
    names = _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4342 08db7c5c Iustin Pop
                                      for i in range(disk_count)])
4343 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4344 e2a65344 Iustin Pop
      disk_index = idx + base_index
4345 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_LV, size=disk["size"],
4346 08db7c5c Iustin Pop
                              logical_id=(vgname, names[idx]),
4347 6ec66eae Iustin Pop
                              iv_name="disk/%d" % disk_index,
4348 6ec66eae Iustin Pop
                              mode=disk["mode"])
4349 08db7c5c Iustin Pop
      disks.append(disk_dev)
4350 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
4351 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
4352 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
4353 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
4354 08db7c5c Iustin Pop
    minors = lu.cfg.AllocateDRBDMinor(
4355 08db7c5c Iustin Pop
      [primary_node, remote_node] * len(disk_info), instance_name)
4356 08db7c5c Iustin Pop
4357 e6c1ff2f Iustin Pop
    names = []
4358 fb4b324b Guido Trotter
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
4359 e6c1ff2f Iustin Pop
                                               for i in range(disk_count)]):
4360 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_data")
4361 e6c1ff2f Iustin Pop
      names.append(lv_prefix + "_meta")
4362 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4363 112050d9 Iustin Pop
      disk_index = idx + base_index
4364 08db7c5c Iustin Pop
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
4365 08db7c5c Iustin Pop
                                      disk["size"], names[idx*2:idx*2+2],
4366 e2a65344 Iustin Pop
                                      "disk/%d" % disk_index,
4367 08db7c5c Iustin Pop
                                      minors[idx*2], minors[idx*2+1])
4368 6ec66eae Iustin Pop
      disk_dev.mode = disk["mode"]
4369 08db7c5c Iustin Pop
      disks.append(disk_dev)
4370 0f1a06e3 Manuel Franceschini
  elif template_name == constants.DT_FILE:
4371 0f1a06e3 Manuel Franceschini
    if len(secondary_nodes) != 0:
4372 0f1a06e3 Manuel Franceschini
      raise errors.ProgrammerError("Wrong template configuration")
4373 0f1a06e3 Manuel Franceschini
4374 08db7c5c Iustin Pop
    for idx, disk in enumerate(disk_info):
4375 112050d9 Iustin Pop
      disk_index = idx + base_index
4376 08db7c5c Iustin Pop
      disk_dev = objects.Disk(dev_type=constants.LD_FILE, size=disk["size"],
4377 e2a65344 Iustin Pop
                              iv_name="disk/%d" % disk_index,
4378 08db7c5c Iustin Pop
                              logical_id=(file_driver,
4379 08db7c5c Iustin Pop
                                          "%s/disk%d" % (file_storage_dir,
4380 43e99cff Guido Trotter
                                                         disk_index)),
4381 6ec66eae Iustin Pop
                              mode=disk["mode"])
4382 08db7c5c Iustin Pop
      disks.append(disk_dev)
4383 a8083063 Iustin Pop
  else:
4384 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
4385 a8083063 Iustin Pop
  return disks
4386 a8083063 Iustin Pop
4387 a8083063 Iustin Pop
4388 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
4389 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
4390 3ecf6786 Iustin Pop

4391 3ecf6786 Iustin Pop
  """
4392 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
4393 a0c3fea1 Michael Hanselmann
4394 a0c3fea1 Michael Hanselmann
4395 b9bddb6b Iustin Pop
def _CreateDisks(lu, instance):
4396 a8083063 Iustin Pop
  """Create all disks for an instance.
4397 a8083063 Iustin Pop

4398 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
4399 a8083063 Iustin Pop

4400 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4401 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4402 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4403 e4376078 Iustin Pop
  @param instance: the instance whose disks we should create
4404 e4376078 Iustin Pop
  @rtype: boolean
4405 e4376078 Iustin Pop
  @return: the success of the creation
4406 a8083063 Iustin Pop

4407 a8083063 Iustin Pop
  """
4408 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
4409 428958aa Iustin Pop
  pnode = instance.primary_node
4410 a0c3fea1 Michael Hanselmann
4411 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4412 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4413 428958aa Iustin Pop
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4414 0f1a06e3 Manuel Franceschini
4415 781de953 Iustin Pop
    if result.failed or not result.data:
4416 428958aa Iustin Pop
      raise errors.OpExecError("Could not connect to node '%s'" % pnode)
4417 0f1a06e3 Manuel Franceschini
4418 781de953 Iustin Pop
    if not result.data[0]:
4419 796cab27 Iustin Pop
      raise errors.OpExecError("Failed to create directory '%s'" %
4420 796cab27 Iustin Pop
                               file_storage_dir)
4421 0f1a06e3 Manuel Franceschini
4422 24991749 Iustin Pop
  # Note: this needs to be kept in sync with adding of disks in
4423 24991749 Iustin Pop
  # LUSetInstanceParams
4424 a8083063 Iustin Pop
  for device in instance.disks:
4425 9a4f63d1 Iustin Pop
    logging.info("Creating volume %s for instance %s",
4426 9a4f63d1 Iustin Pop
                 device.iv_name, instance.name)
4427 a8083063 Iustin Pop
    #HARDCODE
4428 428958aa Iustin Pop
    for node in instance.all_nodes:
4429 428958aa Iustin Pop
      f_create = node == pnode
4430 428958aa Iustin Pop
      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
4431 a8083063 Iustin Pop
4432 a8083063 Iustin Pop
4433 b9bddb6b Iustin Pop
def _RemoveDisks(lu, instance):
4434 a8083063 Iustin Pop
  """Remove all disks for an instance.
4435 a8083063 Iustin Pop

4436 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
4437 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
4438 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
4439 a8083063 Iustin Pop
  with `_CreateDisks()`).
4440 a8083063 Iustin Pop

4441 e4376078 Iustin Pop
  @type lu: L{LogicalUnit}
4442 e4376078 Iustin Pop
  @param lu: the logical unit on whose behalf we execute
4443 e4376078 Iustin Pop
  @type instance: L{objects.Instance}
4444 e4376078 Iustin Pop
  @param instance: the instance whose disks we should remove
4445 e4376078 Iustin Pop
  @rtype: boolean
4446 e4376078 Iustin Pop
  @return: the success of the removal
4447 a8083063 Iustin Pop

4448 a8083063 Iustin Pop
  """
4449 9a4f63d1 Iustin Pop
  logging.info("Removing block devices for instance %s", instance.name)
4450 a8083063 Iustin Pop
4451 e1bc0878 Iustin Pop
  all_result = True
4452 a8083063 Iustin Pop
  for device in instance.disks:
4453 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4454 b9bddb6b Iustin Pop
      lu.cfg.SetDiskID(disk, node)
4455 e1bc0878 Iustin Pop
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4456 e1bc0878 Iustin Pop
      if msg:
4457 e1bc0878 Iustin Pop
        lu.LogWarning("Could not remove block device %s on node %s,"
4458 e1bc0878 Iustin Pop
                      " continuing anyway: %s", device.iv_name, node, msg)
4459 e1bc0878 Iustin Pop
        all_result = False
4460 0f1a06e3 Manuel Franceschini
4461 0f1a06e3 Manuel Franceschini
  if instance.disk_template == constants.DT_FILE:
4462 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4463 781de953 Iustin Pop
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4464 781de953 Iustin Pop
                                                 file_storage_dir)
4465 781de953 Iustin Pop
    if result.failed or not result.data:
4466 9a4f63d1 Iustin Pop
      logging.error("Could not remove directory '%s'", file_storage_dir)
4467 e1bc0878 Iustin Pop
      all_result = False
4468 0f1a06e3 Manuel Franceschini
4469 e1bc0878 Iustin Pop
  return all_result
4470 a8083063 Iustin Pop
4471 a8083063 Iustin Pop
4472 08db7c5c Iustin Pop
def _ComputeDiskSize(disk_template, disks):
4473 e2fe6369 Iustin Pop
  """Compute disk size requirements in the volume group
4474 e2fe6369 Iustin Pop

4475 e2fe6369 Iustin Pop
  """
4476 e2fe6369 Iustin Pop
  # Required free disk space as a function of disk and swap space
4477 e2fe6369 Iustin Pop
  req_size_dict = {
4478 e2fe6369 Iustin Pop
    constants.DT_DISKLESS: None,
4479 08db7c5c Iustin Pop
    constants.DT_PLAIN: sum(d["size"] for d in disks),
4480 08db7c5c Iustin Pop
    # 128 MB are added for drbd metadata for each disk
4481 08db7c5c Iustin Pop
    constants.DT_DRBD8: sum(d["size"] + 128 for d in disks),
4482 e2fe6369 Iustin Pop
    constants.DT_FILE: None,
4483 e2fe6369 Iustin Pop
  }
4484 e2fe6369 Iustin Pop
4485 e2fe6369 Iustin Pop
  if disk_template not in req_size_dict:
4486 e2fe6369 Iustin Pop
    raise errors.ProgrammerError("Disk template '%s' size requirement"
4487 e2fe6369 Iustin Pop
                                 " is unknown" %  disk_template)
4488 e2fe6369 Iustin Pop
4489 e2fe6369 Iustin Pop
  return req_size_dict[disk_template]
4490 e2fe6369 Iustin Pop
4491 e2fe6369 Iustin Pop
4492 74409b12 Iustin Pop
def _CheckHVParams(lu, nodenames, hvname, hvparams):
4493 74409b12 Iustin Pop
  """Hypervisor parameter validation.
4494 74409b12 Iustin Pop

4495 74409b12 Iustin Pop
  This function abstract the hypervisor parameter validation to be
4496 74409b12 Iustin Pop
  used in both instance create and instance modify.
4497 74409b12 Iustin Pop

4498 74409b12 Iustin Pop
  @type lu: L{LogicalUnit}
4499 74409b12 Iustin Pop
  @param lu: the logical unit for which we check
4500 74409b12 Iustin Pop
  @type nodenames: list
4501 74409b12 Iustin Pop
  @param nodenames: the list of nodes on which we should check
4502 74409b12 Iustin Pop
  @type hvname: string
4503 74409b12 Iustin Pop
  @param hvname: the name of the hypervisor we should use
4504 74409b12 Iustin Pop
  @type hvparams: dict
4505 74409b12 Iustin Pop
  @param hvparams: the parameters which we need to check
4506 74409b12 Iustin Pop
  @raise errors.OpPrereqError: if the parameters are not valid
4507 74409b12 Iustin Pop

4508 74409b12 Iustin Pop
  """
4509 74409b12 Iustin Pop
  hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames,
4510 74409b12 Iustin Pop
                                                  hvname,
4511 74409b12 Iustin Pop
                                                  hvparams)
4512 74409b12 Iustin Pop
  for node in nodenames:
4513 781de953 Iustin Pop
    info = hvinfo[node]
4514 68c6f21c Iustin Pop
    if info.offline:
4515 68c6f21c Iustin Pop
      continue
4516 0959c824 Iustin Pop
    msg = info.RemoteFailMsg()
4517 0959c824 Iustin Pop
    if msg:
4518 d64769a8 Iustin Pop
      raise errors.OpPrereqError("Hypervisor parameter validation"
4519 d64769a8 Iustin Pop
                                 " failed on node %s: %s" % (node, msg))
4520 74409b12 Iustin Pop
4521 74409b12 Iustin Pop
4522 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
4523 a8083063 Iustin Pop
  """Create an instance.
4524 a8083063 Iustin Pop

4525 a8083063 Iustin Pop
  """
4526 a8083063 Iustin Pop
  HPATH = "instance-add"
4527 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4528 08db7c5c Iustin Pop
  _OP_REQP = ["instance_name", "disks", "disk_template",
4529 08db7c5c Iustin Pop
              "mode", "start",
4530 08db7c5c Iustin Pop
              "wait_for_sync", "ip_check", "nics",
4531 338e51e8 Iustin Pop
              "hvparams", "beparams"]
4532 7baf741d Guido Trotter
  REQ_BGL = False
4533 7baf741d Guido Trotter
4534 7baf741d Guido Trotter
  def _ExpandNode(self, node):
4535 7baf741d Guido Trotter
    """Expands and checks one node name.
4536 7baf741d Guido Trotter

4537 7baf741d Guido Trotter
    """
4538 7baf741d Guido Trotter
    node_full = self.cfg.ExpandNodeName(node)
4539 7baf741d Guido Trotter
    if node_full is None:
4540 7baf741d Guido Trotter
      raise errors.OpPrereqError("Unknown node %s" % node)
4541 7baf741d Guido Trotter
    return node_full
4542 7baf741d Guido Trotter
4543 7baf741d Guido Trotter
  def ExpandNames(self):
4544 7baf741d Guido Trotter
    """ExpandNames for CreateInstance.
4545 7baf741d Guido Trotter

4546 7baf741d Guido Trotter
    Figure out the right locks for instance creation.
4547 7baf741d Guido Trotter

4548 7baf741d Guido Trotter
    """
4549 7baf741d Guido Trotter
    self.needed_locks = {}
4550 7baf741d Guido Trotter
4551 7baf741d Guido Trotter
    # set optional parameters to none if they don't exist
4552 6785674e Iustin Pop
    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
4553 7baf741d Guido Trotter
      if not hasattr(self.op, attr):
4554 7baf741d Guido Trotter
        setattr(self.op, attr, None)
4555 7baf741d Guido Trotter
4556 4b2f38dd Iustin Pop
    # cheap checks, mostly valid constants given
4557 4b2f38dd Iustin Pop
4558 7baf741d Guido Trotter
    # verify creation mode
4559 7baf741d Guido Trotter
    if self.op.mode not in (constants.INSTANCE_CREATE,
4560 7baf741d Guido Trotter
                            constants.INSTANCE_IMPORT):
4561 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
4562 7baf741d Guido Trotter
                                 self.op.mode)
4563 4b2f38dd Iustin Pop
4564 7baf741d Guido Trotter
    # disk template and mirror node verification
4565 7baf741d Guido Trotter
    if self.op.disk_template not in constants.DISK_TEMPLATES:
4566 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid disk template name")
4567 7baf741d Guido Trotter
4568 4b2f38dd Iustin Pop
    if self.op.hypervisor is None:
4569 4b2f38dd Iustin Pop
      self.op.hypervisor = self.cfg.GetHypervisorType()
4570 4b2f38dd Iustin Pop
4571 8705eb96 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
4572 8705eb96 Iustin Pop
    enabled_hvs = cluster.enabled_hypervisors
4573 4b2f38dd Iustin Pop
    if self.op.hypervisor not in enabled_hvs:
4574 4b2f38dd Iustin Pop
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
4575 4b2f38dd Iustin Pop
                                 " cluster (%s)" % (self.op.hypervisor,
4576 4b2f38dd Iustin Pop
                                  ",".join(enabled_hvs)))
4577 4b2f38dd Iustin Pop
4578 6785674e Iustin Pop
    # check hypervisor parameter syntax (locally)
4579 a5728081 Guido Trotter
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4580 8705eb96 Iustin Pop
    filled_hvp = cluster.FillDict(cluster.hvparams[self.op.hypervisor],
4581 8705eb96 Iustin Pop
                                  self.op.hvparams)
4582 6785674e Iustin Pop
    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
4583 8705eb96 Iustin Pop
    hv_type.CheckParameterSyntax(filled_hvp)
4584 67fc3042 Iustin Pop
    self.hv_full = filled_hvp
4585 6785674e Iustin Pop
4586 338e51e8 Iustin Pop
    # fill and remember the beparams dict
4587 a5728081 Guido Trotter
    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4588 338e51e8 Iustin Pop
    self.be_full = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
4589 338e51e8 Iustin Pop
                                    self.op.beparams)
4590 338e51e8 Iustin Pop
4591 7baf741d Guido Trotter
    #### instance parameters check
4592 7baf741d Guido Trotter
4593 7baf741d Guido Trotter
    # instance name verification
4594 7baf741d Guido Trotter
    hostname1 = utils.HostInfo(self.op.instance_name)
4595 7baf741d Guido Trotter
    self.op.instance_name = instance_name = hostname1.name
4596 7baf741d Guido Trotter
4597 7baf741d Guido Trotter
    # this is just a preventive check, but someone might still add this
4598 7baf741d Guido Trotter
    # instance in the meantime, and creation will fail at lock-add time
4599 7baf741d Guido Trotter
    if instance_name in self.cfg.GetInstanceList():
4600 7baf741d Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4601 7baf741d Guido Trotter
                                 instance_name)
4602 7baf741d Guido Trotter
4603 7baf741d Guido Trotter
    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
4604 7baf741d Guido Trotter
4605 08db7c5c Iustin Pop
    # NIC buildup
4606 08db7c5c Iustin Pop
    self.nics = []
4607 08db7c5c Iustin Pop
    for nic in self.op.nics:
4608 08db7c5c Iustin Pop
      # ip validity checks
4609 08db7c5c Iustin Pop
      ip = nic.get("ip", None)
4610 08db7c5c Iustin Pop
      if ip is None or ip.lower() == "none":
4611 08db7c5c Iustin Pop
        nic_ip = None
4612 08db7c5c Iustin Pop
      elif ip.lower() == constants.VALUE_AUTO:
4613 08db7c5c Iustin Pop
        nic_ip = hostname1.ip
4614 08db7c5c Iustin Pop
      else:
4615 08db7c5c Iustin Pop
        if not utils.IsValidIP(ip):
4616 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
4617 08db7c5c Iustin Pop
                                     " like a valid IP" % ip)
4618 08db7c5c Iustin Pop
        nic_ip = ip
4619 08db7c5c Iustin Pop
4620 08db7c5c Iustin Pop
      # MAC address verification
4621 08db7c5c Iustin Pop
      mac = nic.get("mac", constants.VALUE_AUTO)
4622 08db7c5c Iustin Pop
      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4623 08db7c5c Iustin Pop
        if not utils.IsValidMac(mac.lower()):
4624 08db7c5c Iustin Pop
          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
4625 08db7c5c Iustin Pop
                                     mac)
4626 87e43988 Iustin Pop
        else:
4627 87e43988 Iustin Pop
          # or validate/reserve the current one
4628 87e43988 Iustin Pop
          if self.cfg.IsMacInUse(mac):
4629 87e43988 Iustin Pop
            raise errors.OpPrereqError("MAC address %s already in use"
4630 87e43988 Iustin Pop
                                       " in cluster" % mac)
4631 87e43988 Iustin Pop
4632 08db7c5c Iustin Pop
      # bridge verification
4633 9939547b Iustin Pop
      bridge = nic.get("bridge", None)
4634 9939547b Iustin Pop
      if bridge is None:
4635 9939547b Iustin Pop
        bridge = self.cfg.GetDefBridge()
4636 08db7c5c Iustin Pop
      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, bridge=bridge))
4637 08db7c5c Iustin Pop
4638 08db7c5c Iustin Pop
    # disk checks/pre-build
4639 08db7c5c Iustin Pop
    self.disks = []
4640 08db7c5c Iustin Pop
    for disk in self.op.disks:
4641 08db7c5c Iustin Pop
      mode = disk.get("mode", constants.DISK_RDWR)
4642 08db7c5c Iustin Pop
      if mode not in constants.DISK_ACCESS_SET:
4643 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
4644 08db7c5c Iustin Pop
                                   mode)
4645 08db7c5c Iustin Pop
      size = disk.get("size", None)
4646 08db7c5c Iustin Pop
      if size is None:
4647 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Missing disk size")
4648 08db7c5c Iustin Pop
      try:
4649 08db7c5c Iustin Pop
        size = int(size)
4650 08db7c5c Iustin Pop
      except ValueError:
4651 08db7c5c Iustin Pop
        raise errors.OpPrereqError("Invalid disk size '%s'" % size)
4652 08db7c5c Iustin Pop
      self.disks.append({"size": size, "mode": mode})
4653 08db7c5c Iustin Pop
4654 7baf741d Guido Trotter
    # used in CheckPrereq for ip ping check
4655 7baf741d Guido Trotter
    self.check_ip = hostname1.ip
4656 7baf741d Guido Trotter
4657 7baf741d Guido Trotter
    # file storage checks
4658 7baf741d Guido Trotter
    if (self.op.file_driver and
4659 7baf741d Guido Trotter
        not self.op.file_driver in constants.FILE_DRIVER):
4660 7baf741d Guido Trotter
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
4661 7baf741d Guido Trotter
                                 self.op.file_driver)
4662 7baf741d Guido Trotter
4663 7baf741d Guido Trotter
    if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir):
4664 7baf741d Guido Trotter
      raise errors.OpPrereqError("File storage directory path not absolute")
4665 7baf741d Guido Trotter
4666 7baf741d Guido Trotter
    ### Node/iallocator related checks
4667 7baf741d Guido Trotter
    if [self.op.iallocator, self.op.pnode].count(None) != 1:
4668 7baf741d Guido Trotter
      raise errors.OpPrereqError("One and only one of iallocator and primary"
4669 7baf741d Guido Trotter
                                 " node must be given")
4670 7baf741d Guido Trotter
4671 7baf741d Guido Trotter
    if self.op.iallocator:
4672 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4673 7baf741d Guido Trotter
    else:
4674 7baf741d Guido Trotter
      self.op.pnode = self._ExpandNode(self.op.pnode)
4675 7baf741d Guido Trotter
      nodelist = [self.op.pnode]
4676 7baf741d Guido Trotter
      if self.op.snode is not None:
4677 7baf741d Guido Trotter
        self.op.snode = self._ExpandNode(self.op.snode)
4678 7baf741d Guido Trotter
        nodelist.append(self.op.snode)
4679 7baf741d Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = nodelist
4680 7baf741d Guido Trotter
4681 7baf741d Guido Trotter
    # in case of import lock the source node too
4682 7baf741d Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
4683 7baf741d Guido Trotter
      src_node = getattr(self.op, "src_node", None)
4684 7baf741d Guido Trotter
      src_path = getattr(self.op, "src_path", None)
4685 7baf741d Guido Trotter
4686 b9322a9f Guido Trotter
      if src_path is None:
4687 b9322a9f Guido Trotter
        self.op.src_path = src_path = self.op.instance_name
4688 b9322a9f Guido Trotter
4689 b9322a9f Guido Trotter
      if src_node is None:
4690 b9322a9f Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4691 b9322a9f Guido Trotter
        self.op.src_node = None
4692 b9322a9f Guido Trotter
        if os.path.isabs(src_path):
4693 b9322a9f Guido Trotter
          raise errors.OpPrereqError("Importing an instance from an absolute"
4694 b9322a9f Guido Trotter
                                     " path requires a source node option.")
4695 b9322a9f Guido Trotter
      else:
4696 b9322a9f Guido Trotter
        self.op.src_node = src_node = self._ExpandNode(src_node)
4697 b9322a9f Guido Trotter
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4698 b9322a9f Guido Trotter
          self.needed_locks[locking.LEVEL_NODE].append(src_node)
4699 b9322a9f Guido Trotter
        if not os.path.isabs(src_path):
4700 b9322a9f Guido Trotter
          self.op.src_path = src_path = \
4701 b9322a9f Guido Trotter
            os.path.join(constants.EXPORT_DIR, src_path)
4702 7baf741d Guido Trotter
4703 7baf741d Guido Trotter
    else: # INSTANCE_CREATE
4704 7baf741d Guido Trotter
      if getattr(self.op, "os_type", None) is None:
4705 7baf741d Guido Trotter
        raise errors.OpPrereqError("No guest OS specified")
4706 a8083063 Iustin Pop
4707 538475ca Iustin Pop
  def _RunAllocator(self):
4708 538475ca Iustin Pop
    """Run the allocator based on input opcode.
4709 538475ca Iustin Pop

4710 538475ca Iustin Pop
    """
4711 08db7c5c Iustin Pop
    nics = [n.ToDict() for n in self.nics]
4712 72737a7f Iustin Pop
    ial = IAllocator(self,
4713 29859cb7 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_ALLOC,
4714 d1c2dd75 Iustin Pop
                     name=self.op.instance_name,
4715 d1c2dd75 Iustin Pop
                     disk_template=self.op.disk_template,
4716 d1c2dd75 Iustin Pop
                     tags=[],
4717 d1c2dd75 Iustin Pop
                     os=self.op.os_type,
4718 338e51e8 Iustin Pop
                     vcpus=self.be_full[constants.BE_VCPUS],
4719 338e51e8 Iustin Pop
                     mem_size=self.be_full[constants.BE_MEMORY],
4720 08db7c5c Iustin Pop
                     disks=self.disks,
4721 d1c2dd75 Iustin Pop
                     nics=nics,
4722 8cc7e742 Guido Trotter
                     hypervisor=self.op.hypervisor,
4723 29859cb7 Iustin Pop
                     )
4724 d1c2dd75 Iustin Pop
4725 d1c2dd75 Iustin Pop
    ial.Run(self.op.iallocator)
4726 d1c2dd75 Iustin Pop
4727 d1c2dd75 Iustin Pop
    if not ial.success:
4728 538475ca Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
4729 538475ca Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
4730 d1c2dd75 Iustin Pop
                                                           ial.info))
4731 27579978 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
4732 538475ca Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
4733 538475ca Iustin Pop
                                 " of nodes (%s), required %s" %
4734 97abc79f Iustin Pop
                                 (self.op.iallocator, len(ial.nodes),
4735 1ce4bbe3 Renรฉ Nussbaumer
                                  ial.required_nodes))
4736 d1c2dd75 Iustin Pop
    self.op.pnode = ial.nodes[0]
4737 86d9d3bb Iustin Pop
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
4738 86d9d3bb Iustin Pop
                 self.op.instance_name, self.op.iallocator,
4739 86d9d3bb Iustin Pop
                 ", ".join(ial.nodes))
4740 27579978 Iustin Pop
    if ial.required_nodes == 2:
4741 d1c2dd75 Iustin Pop
      self.op.snode = ial.nodes[1]
4742 538475ca Iustin Pop
4743 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4744 a8083063 Iustin Pop
    """Build hooks env.
4745 a8083063 Iustin Pop

4746 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
4747 a8083063 Iustin Pop

4748 a8083063 Iustin Pop
    """
4749 a8083063 Iustin Pop
    env = {
4750 2c2690c9 Iustin Pop
      "ADD_MODE": self.op.mode,
4751 a8083063 Iustin Pop
      }
4752 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4753 2c2690c9 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
4754 2c2690c9 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
4755 2c2690c9 Iustin Pop
      env["SRC_IMAGES"] = self.src_images
4756 396e1b78 Michael Hanselmann
4757 2c2690c9 Iustin Pop
    env.update(_BuildInstanceHookEnv(
4758 2c2690c9 Iustin Pop
      name=self.op.instance_name,
4759 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
4760 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
4761 4978db17 Iustin Pop
      status=self.op.start,
4762 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
4763 338e51e8 Iustin Pop
      memory=self.be_full[constants.BE_MEMORY],
4764 338e51e8 Iustin Pop
      vcpus=self.be_full[constants.BE_VCPUS],
4765 08db7c5c Iustin Pop
      nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
4766 2c2690c9 Iustin Pop
      disk_template=self.op.disk_template,
4767 2c2690c9 Iustin Pop
      disks=[(d["size"], d["mode"]) for d in self.disks],
4768 67fc3042 Iustin Pop
      bep=self.be_full,
4769 67fc3042 Iustin Pop
      hvp=self.hv_full,
4770 3df6e710 Iustin Pop
      hypervisor_name=self.op.hypervisor,
4771 396e1b78 Michael Hanselmann
    ))
4772 a8083063 Iustin Pop
4773 d6a02168 Michael Hanselmann
    nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
4774 a8083063 Iustin Pop
          self.secondaries)
4775 a8083063 Iustin Pop
    return env, nl, nl
4776 a8083063 Iustin Pop
4777 a8083063 Iustin Pop
4778 a8083063 Iustin Pop
  def CheckPrereq(self):
4779 a8083063 Iustin Pop
    """Check prerequisites.
4780 a8083063 Iustin Pop

4781 a8083063 Iustin Pop
    """
4782 eedc99de Manuel Franceschini
    if (not self.cfg.GetVGName() and
4783 eedc99de Manuel Franceschini
        self.op.disk_template not in constants.DTS_NOT_LVM):
4784 eedc99de Manuel Franceschini
      raise errors.OpPrereqError("Cluster does not support lvm-based"
4785 eedc99de Manuel Franceschini
                                 " instances")
4786 eedc99de Manuel Franceschini
4787 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
4788 7baf741d Guido Trotter
      src_node = self.op.src_node
4789 7baf741d Guido Trotter
      src_path = self.op.src_path
4790 a8083063 Iustin Pop
4791 c0cbdc67 Guido Trotter
      if src_node is None:
4792 c0cbdc67 Guido Trotter
        exp_list = self.rpc.call_export_list(
4793 781de953 Iustin Pop
          self.acquired_locks[locking.LEVEL_NODE])
4794 c0cbdc67 Guido Trotter
        found = False
4795 c0cbdc67 Guido Trotter
        for node in exp_list:
4796 781de953 Iustin Pop
          if not exp_list[node].failed and src_path in exp_list[node].data:
4797 c0cbdc67 Guido Trotter
            found = True
4798 c0cbdc67 Guido Trotter
            self.op.src_node = src_node = node
4799 c0cbdc67 Guido Trotter
            self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
4800 c0cbdc67 Guido Trotter
                                                       src_path)
4801 c0cbdc67 Guido Trotter
            break
4802 c0cbdc67 Guido Trotter
        if not found:
4803 c0cbdc67 Guido Trotter
          raise errors.OpPrereqError("No export found for relative path %s" %
4804 c0cbdc67 Guido Trotter
                                      src_path)
4805 c0cbdc67 Guido Trotter
4806 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, src_node)
4807 781de953 Iustin Pop
      result = self.rpc.call_export_info(src_node, src_path)
4808 781de953 Iustin Pop
      result.Raise()
4809 781de953 Iustin Pop
      if not result.data:
4810 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
4811 a8083063 Iustin Pop
4812 781de953 Iustin Pop
      export_info = result.data
4813 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
4814 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
4815 a8083063 Iustin Pop
4816 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
4817 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
4818 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
4819 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
4820 a8083063 Iustin Pop
4821 09acf207 Guido Trotter
      # Check that the new instance doesn't have less disks than the export
4822 08db7c5c Iustin Pop
      instance_disks = len(self.disks)
4823 09acf207 Guido Trotter
      export_disks = export_info.getint(constants.INISECT_INS, 'disk_count')
4824 09acf207 Guido Trotter
      if instance_disks < export_disks:
4825 09acf207 Guido Trotter
        raise errors.OpPrereqError("Not enough disks to import."
4826 09acf207 Guido Trotter
                                   " (instance: %d, export: %d)" %
4827 726d7d68 Iustin Pop
                                   (instance_disks, export_disks))
4828 a8083063 Iustin Pop
4829 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
4830 09acf207 Guido Trotter
      disk_images = []
4831 09acf207 Guido Trotter
      for idx in range(export_disks):
4832 09acf207 Guido Trotter
        option = 'disk%d_dump' % idx
4833 09acf207 Guido Trotter
        if export_info.has_option(constants.INISECT_INS, option):
4834 09acf207 Guido Trotter
          # FIXME: are the old os-es, disk sizes, etc. useful?
4835 09acf207 Guido Trotter
          export_name = export_info.get(constants.INISECT_INS, option)
4836 09acf207 Guido Trotter
          image = os.path.join(src_path, export_name)
4837 09acf207 Guido Trotter
          disk_images.append(image)
4838 09acf207 Guido Trotter
        else:
4839 09acf207 Guido Trotter
          disk_images.append(False)
4840 09acf207 Guido Trotter
4841 09acf207 Guido Trotter
      self.src_images = disk_images
4842 901a65c1 Iustin Pop
4843 b4364a6b Guido Trotter
      old_name = export_info.get(constants.INISECT_INS, 'name')
4844 b4364a6b Guido Trotter
      # FIXME: int() here could throw a ValueError on broken exports
4845 b4364a6b Guido Trotter
      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
4846 b4364a6b Guido Trotter
      if self.op.instance_name == old_name:
4847 b4364a6b Guido Trotter
        for idx, nic in enumerate(self.nics):
4848 b4364a6b Guido Trotter
          if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
4849 b4364a6b Guido Trotter
            nic_mac_ini = 'nic%d_mac' % idx
4850 b4364a6b Guido Trotter
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
4851 bc89efc3 Guido Trotter
4852 295728df Guido Trotter
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
4853 7baf741d Guido Trotter
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
4854 901a65c1 Iustin Pop
    if self.op.start and not self.op.ip_check:
4855 901a65c1 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
4856 901a65c1 Iustin Pop
                                 " adding an instance in start mode")
4857 901a65c1 Iustin Pop
4858 901a65c1 Iustin Pop
    if self.op.ip_check:
4859 7baf741d Guido Trotter
      if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
4860 901a65c1 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4861 7b3a8fb5 Iustin Pop
                                   (self.check_ip, self.op.instance_name))
4862 901a65c1 Iustin Pop
4863 295728df Guido Trotter
    #### mac address generation
4864 295728df Guido Trotter
    # By generating here the mac address both the allocator and the hooks get
4865 295728df Guido Trotter
    # the real final mac address rather than the 'auto' or 'generate' value.
4866 295728df Guido Trotter
    # There is a race condition between the generation and the instance object
4867 295728df Guido Trotter
    # creation, which means that we know the mac is valid now, but we're not
4868 295728df Guido Trotter
    # sure it will be when we actually add the instance. If things go bad
4869 295728df Guido Trotter
    # adding the instance will abort because of a duplicate mac, and the
4870 295728df Guido Trotter
    # creation job will fail.
4871 295728df Guido Trotter
    for nic in self.nics:
4872 295728df Guido Trotter
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
4873 295728df Guido Trotter
        nic.mac = self.cfg.GenerateMAC()
4874 295728df Guido Trotter
4875 538475ca Iustin Pop
    #### allocator run
4876 538475ca Iustin Pop
4877 538475ca Iustin Pop
    if self.op.iallocator is not None:
4878 538475ca Iustin Pop
      self._RunAllocator()
4879 0f1a06e3 Manuel Franceschini
4880 901a65c1 Iustin Pop
    #### node related checks
4881 901a65c1 Iustin Pop
4882 901a65c1 Iustin Pop
    # check primary node
4883 7baf741d Guido Trotter
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
4884 7baf741d Guido Trotter
    assert self.pnode is not None, \
4885 7baf741d Guido Trotter
      "Cannot retrieve locked node %s" % self.op.pnode
4886 7527a8a4 Iustin Pop
    if pnode.offline:
4887 7527a8a4 Iustin Pop
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
4888 7527a8a4 Iustin Pop
                                 pnode.name)
4889 733a2b6a Iustin Pop
    if pnode.drained:
4890 733a2b6a Iustin Pop
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
4891 733a2b6a Iustin Pop
                                 pnode.name)
4892 7527a8a4 Iustin Pop
4893 901a65c1 Iustin Pop
    self.secondaries = []
4894 901a65c1 Iustin Pop
4895 901a65c1 Iustin Pop
    # mirror node verification
4896 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
4897 7baf741d Guido Trotter
      if self.op.snode is None:
4898 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
4899 3ecf6786 Iustin Pop
                                   " a mirror node")
4900 7baf741d Guido Trotter
      if self.op.snode == pnode.name:
4901 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
4902 3ecf6786 Iustin Pop
                                   " the primary node.")
4903 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, self.op.snode)
4904 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, self.op.snode)
4905 733a2b6a Iustin Pop
      self.secondaries.append(self.op.snode)
4906 a8083063 Iustin Pop
4907 6785674e Iustin Pop
    nodenames = [pnode.name] + self.secondaries
4908 6785674e Iustin Pop
4909 e2fe6369 Iustin Pop
    req_size = _ComputeDiskSize(self.op.disk_template,
4910 08db7c5c Iustin Pop
                                self.disks)
4911 ed1ebc60 Guido Trotter
4912 8d75db10 Iustin Pop
    # Check lv size requirements
4913 8d75db10 Iustin Pop
    if req_size is not None:
4914 72737a7f Iustin Pop
      nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
4915 72737a7f Iustin Pop
                                         self.op.hypervisor)
4916 8d75db10 Iustin Pop
      for node in nodenames:
4917 781de953 Iustin Pop
        info = nodeinfo[node]
4918 781de953 Iustin Pop
        info.Raise()
4919 781de953 Iustin Pop
        info = info.data
4920 8d75db10 Iustin Pop
        if not info:
4921 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
4922 3e91897b Iustin Pop
                                     " from node '%s'" % node)
4923 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
4924 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
4925 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
4926 8d75db10 Iustin Pop
                                     " node %s" % node)
4927 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
4928 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
4929 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
4930 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
4931 ed1ebc60 Guido Trotter
4932 74409b12 Iustin Pop
    _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
4933 6785674e Iustin Pop
4934 a8083063 Iustin Pop
    # os verification
4935 781de953 Iustin Pop
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4936 781de953 Iustin Pop
    result.Raise()
4937 6dfad215 Iustin Pop
    if not isinstance(result.data, objects.OS) or not result.data:
4938 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4939 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
4940 a8083063 Iustin Pop
4941 901a65c1 Iustin Pop
    # bridge check on primary node
4942 08db7c5c Iustin Pop
    bridges = [n.bridge for n in self.nics]
4943 781de953 Iustin Pop
    result = self.rpc.call_bridges_exist(self.pnode.name, bridges)
4944 781de953 Iustin Pop
    result.Raise()
4945 781de953 Iustin Pop
    if not result.data:
4946 781de953 Iustin Pop
      raise errors.OpPrereqError("One of the target bridges '%s' does not"
4947 781de953 Iustin Pop
                                 " exist on destination node '%s'" %
4948 08db7c5c Iustin Pop
                                 (",".join(bridges), pnode.name))
4949 a8083063 Iustin Pop
4950 49ce1563 Iustin Pop
    # memory check on primary node
4951 49ce1563 Iustin Pop
    if self.op.start:
4952 b9bddb6b Iustin Pop
      _CheckNodeFreeMemory(self, self.pnode.name,
4953 49ce1563 Iustin Pop
                           "creating instance %s" % self.op.instance_name,
4954 338e51e8 Iustin Pop
                           self.be_full[constants.BE_MEMORY],
4955 338e51e8 Iustin Pop
                           self.op.hypervisor)
4956 49ce1563 Iustin Pop
4957 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4958 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
4959 a8083063 Iustin Pop

4960 a8083063 Iustin Pop
    """
4961 a8083063 Iustin Pop
    instance = self.op.instance_name
4962 a8083063 Iustin Pop
    pnode_name = self.pnode.name
4963 a8083063 Iustin Pop
4964 e69d05fd Iustin Pop
    ht_kind = self.op.hypervisor
4965 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
4966 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
4967 2a6469d5 Alexander Schreiber
    else:
4968 2a6469d5 Alexander Schreiber
      network_port = None
4969 58acb49d Alexander Schreiber
4970 6785674e Iustin Pop
    ##if self.op.vnc_bind_address is None:
4971 6785674e Iustin Pop
    ##  self.op.vnc_bind_address = constants.VNC_DEFAULT_BIND_ADDRESS
4972 31a853d2 Iustin Pop
4973 2c313123 Manuel Franceschini
    # this is needed because os.path.join does not accept None arguments
4974 2c313123 Manuel Franceschini
    if self.op.file_storage_dir is None:
4975 2c313123 Manuel Franceschini
      string_file_storage_dir = ""
4976 2c313123 Manuel Franceschini
    else:
4977 2c313123 Manuel Franceschini
      string_file_storage_dir = self.op.file_storage_dir
4978 2c313123 Manuel Franceschini
4979 0f1a06e3 Manuel Franceschini
    # build the full file storage dir path
4980 0f1a06e3 Manuel Franceschini
    file_storage_dir = os.path.normpath(os.path.join(
4981 d6a02168 Michael Hanselmann
                                        self.cfg.GetFileStorageDir(),
4982 2c313123 Manuel Franceschini
                                        string_file_storage_dir, instance))
4983 0f1a06e3 Manuel Franceschini
4984 0f1a06e3 Manuel Franceschini
4985 b9bddb6b Iustin Pop
    disks = _GenerateDiskTemplate(self,
4986 a8083063 Iustin Pop
                                  self.op.disk_template,
4987 a8083063 Iustin Pop
                                  instance, pnode_name,
4988 08db7c5c Iustin Pop
                                  self.secondaries,
4989 08db7c5c Iustin Pop
                                  self.disks,
4990 0f1a06e3 Manuel Franceschini
                                  file_storage_dir,
4991 e2a65344 Iustin Pop
                                  self.op.file_driver,
4992 e2a65344 Iustin Pop
                                  0)
4993 a8083063 Iustin Pop
4994 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
4995 a8083063 Iustin Pop
                            primary_node=pnode_name,
4996 08db7c5c Iustin Pop
                            nics=self.nics, disks=disks,
4997 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
4998 4978db17 Iustin Pop
                            admin_up=False,
4999 58acb49d Alexander Schreiber
                            network_port=network_port,
5000 338e51e8 Iustin Pop
                            beparams=self.op.beparams,
5001 6785674e Iustin Pop
                            hvparams=self.op.hvparams,
5002 e69d05fd Iustin Pop
                            hypervisor=self.op.hypervisor,
5003 a8083063 Iustin Pop
                            )
5004 a8083063 Iustin Pop
5005 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
5006 796cab27 Iustin Pop
    try:
5007 796cab27 Iustin Pop
      _CreateDisks(self, iobj)
5008 796cab27 Iustin Pop
    except errors.OpExecError:
5009 796cab27 Iustin Pop
      self.LogWarning("Device creation failed, reverting...")
5010 796cab27 Iustin Pop
      try:
5011 796cab27 Iustin Pop
        _RemoveDisks(self, iobj)
5012 796cab27 Iustin Pop
      finally:
5013 796cab27 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance)
5014 796cab27 Iustin Pop
        raise
5015 a8083063 Iustin Pop
5016 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
5017 a8083063 Iustin Pop
5018 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
5019 7baf741d Guido Trotter
    # Declare that we don't want to remove the instance lock anymore, as we've
5020 7baf741d Guido Trotter
    # added the instance to the config
5021 7baf741d Guido Trotter
    del self.remove_locks[locking.LEVEL_INSTANCE]
5022 e36e96b4 Guido Trotter
    # Unlock all the nodes
5023 9c8971d7 Guido Trotter
    if self.op.mode == constants.INSTANCE_IMPORT:
5024 9c8971d7 Guido Trotter
      nodes_keep = [self.op.src_node]
5025 9c8971d7 Guido Trotter
      nodes_release = [node for node in self.acquired_locks[locking.LEVEL_NODE]
5026 9c8971d7 Guido Trotter
                       if node != self.op.src_node]
5027 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE, nodes_release)
5028 9c8971d7 Guido Trotter
      self.acquired_locks[locking.LEVEL_NODE] = nodes_keep
5029 9c8971d7 Guido Trotter
    else:
5030 9c8971d7 Guido Trotter
      self.context.glm.release(locking.LEVEL_NODE)
5031 9c8971d7 Guido Trotter
      del self.acquired_locks[locking.LEVEL_NODE]
5032 a8083063 Iustin Pop
5033 a8083063 Iustin Pop
    if self.op.wait_for_sync:
5034 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj)
5035 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
5036 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
5037 a8083063 Iustin Pop
      time.sleep(15)
5038 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
5039 b9bddb6b Iustin Pop
      disk_abort = not _WaitForSync(self, iobj, oneshot=True)
5040 a8083063 Iustin Pop
    else:
5041 a8083063 Iustin Pop
      disk_abort = False
5042 a8083063 Iustin Pop
5043 a8083063 Iustin Pop
    if disk_abort:
5044 b9bddb6b Iustin Pop
      _RemoveDisks(self, iobj)
5045 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
5046 7baf741d Guido Trotter
      # Make sure the instance lock gets removed
5047 7baf741d Guido Trotter
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
5048 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
5049 3ecf6786 Iustin Pop
                               " this instance")
5050 a8083063 Iustin Pop
5051 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
5052 a8083063 Iustin Pop
                (instance, pnode_name))
5053 a8083063 Iustin Pop
5054 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
5055 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
5056 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
5057 781de953 Iustin Pop
        result = self.rpc.call_instance_os_add(pnode_name, iobj)
5058 20e01edd Iustin Pop
        msg = result.RemoteFailMsg()
5059 20e01edd Iustin Pop
        if msg:
5060 781de953 Iustin Pop
          raise errors.OpExecError("Could not add os for instance %s"
5061 20e01edd Iustin Pop
                                   " on node %s: %s" %
5062 20e01edd Iustin Pop
                                   (instance, pnode_name, msg))
5063 a8083063 Iustin Pop
5064 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
5065 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
5066 a8083063 Iustin Pop
        src_node = self.op.src_node
5067 09acf207 Guido Trotter
        src_images = self.src_images
5068 62c9ec92 Iustin Pop
        cluster_name = self.cfg.GetClusterName()
5069 6c0af70e Guido Trotter
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
5070 09acf207 Guido Trotter
                                                         src_node, src_images,
5071 6c0af70e Guido Trotter
                                                         cluster_name)
5072 781de953 Iustin Pop
        import_result.Raise()
5073 781de953 Iustin Pop
        for idx, result in enumerate(import_result.data):
5074 09acf207 Guido Trotter
          if not result:
5075 726d7d68 Iustin Pop
            self.LogWarning("Could not import the image %s for instance"
5076 726d7d68 Iustin Pop
                            " %s, disk %d, on node %s" %
5077 726d7d68 Iustin Pop
                            (src_images[idx], instance, idx, pnode_name))
5078 a8083063 Iustin Pop
      else:
5079 a8083063 Iustin Pop
        # also checked in the prereq part
5080 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
5081 3ecf6786 Iustin Pop
                                     % self.op.mode)
5082 a8083063 Iustin Pop
5083 a8083063 Iustin Pop
    if self.op.start:
5084 4978db17 Iustin Pop
      iobj.admin_up = True
5085 4978db17 Iustin Pop
      self.cfg.Update(iobj)
5086 9a4f63d1 Iustin Pop
      logging.info("Starting instance %s on node %s", instance, pnode_name)
5087 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
5088 0eca8e0c Iustin Pop
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
5089 dd279568 Iustin Pop
      msg = result.RemoteFailMsg()
5090 dd279568 Iustin Pop
      if msg:
5091 dd279568 Iustin Pop
        raise errors.OpExecError("Could not start instance: %s" % msg)
5092 a8083063 Iustin Pop
5093 a8083063 Iustin Pop
5094 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
5095 a8083063 Iustin Pop
  """Connect to an instance's console.
5096 a8083063 Iustin Pop

5097 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
5098 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
5099 a8083063 Iustin Pop
  console.
5100 a8083063 Iustin Pop

5101 a8083063 Iustin Pop
  """
5102 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
5103 8659b73e Guido Trotter
  REQ_BGL = False
5104 8659b73e Guido Trotter
5105 8659b73e Guido Trotter
  def ExpandNames(self):
5106 8659b73e Guido Trotter
    self._ExpandAndLockInstance()
5107 a8083063 Iustin Pop
5108 a8083063 Iustin Pop
  def CheckPrereq(self):
5109 a8083063 Iustin Pop
    """Check prerequisites.
5110 a8083063 Iustin Pop

5111 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5112 a8083063 Iustin Pop

5113 a8083063 Iustin Pop
    """
5114 8659b73e Guido Trotter
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5115 8659b73e Guido Trotter
    assert self.instance is not None, \
5116 8659b73e Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5117 513e896d Guido Trotter
    _CheckNodeOnline(self, self.instance.primary_node)
5118 a8083063 Iustin Pop
5119 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5120 a8083063 Iustin Pop
    """Connect to the console of an instance
5121 a8083063 Iustin Pop

5122 a8083063 Iustin Pop
    """
5123 a8083063 Iustin Pop
    instance = self.instance
5124 a8083063 Iustin Pop
    node = instance.primary_node
5125 a8083063 Iustin Pop
5126 72737a7f Iustin Pop
    node_insts = self.rpc.call_instance_list([node],
5127 72737a7f Iustin Pop
                                             [instance.hypervisor])[node]
5128 781de953 Iustin Pop
    node_insts.Raise()
5129 a8083063 Iustin Pop
5130 781de953 Iustin Pop
    if instance.name not in node_insts.data:
5131 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
5132 a8083063 Iustin Pop
5133 9a4f63d1 Iustin Pop
    logging.debug("Connecting to console of %s on %s", instance.name, node)
5134 a8083063 Iustin Pop
5135 e69d05fd Iustin Pop
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
5136 5431b2e4 Guido Trotter
    cluster = self.cfg.GetClusterInfo()
5137 5431b2e4 Guido Trotter
    # beparams and hvparams are passed separately, to avoid editing the
5138 5431b2e4 Guido Trotter
    # instance and then saving the defaults in the instance itself.
5139 5431b2e4 Guido Trotter
    hvparams = cluster.FillHV(instance)
5140 5431b2e4 Guido Trotter
    beparams = cluster.FillBE(instance)
5141 5431b2e4 Guido Trotter
    console_cmd = hyper.GetShellCommandForConsole(instance, hvparams, beparams)
5142 b047857b Michael Hanselmann
5143 82122173 Iustin Pop
    # build ssh cmdline
5144 0a80a26f Michael Hanselmann
    return self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
5145 a8083063 Iustin Pop
5146 a8083063 Iustin Pop
5147 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
5148 a8083063 Iustin Pop
  """Replace the disks of an instance.
5149 a8083063 Iustin Pop

5150 a8083063 Iustin Pop
  """
5151 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
5152 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5153 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
5154 efd990e4 Guido Trotter
  REQ_BGL = False
5155 efd990e4 Guido Trotter
5156 7e9366f7 Iustin Pop
  def CheckArguments(self):
5157 efd990e4 Guido Trotter
    if not hasattr(self.op, "remote_node"):
5158 efd990e4 Guido Trotter
      self.op.remote_node = None
5159 7e9366f7 Iustin Pop
    if not hasattr(self.op, "iallocator"):
5160 7e9366f7 Iustin Pop
      self.op.iallocator = None
5161 7e9366f7 Iustin Pop
5162 7e9366f7 Iustin Pop
    # check for valid parameter combination
5163 7e9366f7 Iustin Pop
    cnt = [self.op.remote_node, self.op.iallocator].count(None)
5164 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5165 7e9366f7 Iustin Pop
      if cnt == 2:
5166 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("When changing the secondary either an"
5167 7e9366f7 Iustin Pop
                                   " iallocator script must be used or the"
5168 7e9366f7 Iustin Pop
                                   " new node given")
5169 7e9366f7 Iustin Pop
      elif cnt == 0:
5170 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Give either the iallocator or the new"
5171 efd990e4 Guido Trotter
                                   " secondary, not both")
5172 7e9366f7 Iustin Pop
    else: # not replacing the secondary
5173 7e9366f7 Iustin Pop
      if cnt != 2:
5174 7e9366f7 Iustin Pop
        raise errors.OpPrereqError("The iallocator and new node options can"
5175 7e9366f7 Iustin Pop
                                   " be used only when changing the"
5176 7e9366f7 Iustin Pop
                                   " secondary node")
5177 7e9366f7 Iustin Pop
5178 7e9366f7 Iustin Pop
  def ExpandNames(self):
5179 7e9366f7 Iustin Pop
    self._ExpandAndLockInstance()
5180 7e9366f7 Iustin Pop
5181 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5182 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
5183 efd990e4 Guido Trotter
    elif self.op.remote_node is not None:
5184 efd990e4 Guido Trotter
      remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
5185 efd990e4 Guido Trotter
      if remote_node is None:
5186 efd990e4 Guido Trotter
        raise errors.OpPrereqError("Node '%s' not known" %
5187 efd990e4 Guido Trotter
                                   self.op.remote_node)
5188 efd990e4 Guido Trotter
      self.op.remote_node = remote_node
5189 3b559640 Iustin Pop
      # Warning: do not remove the locking of the new secondary here
5190 3b559640 Iustin Pop
      # unless DRBD8.AddChildren is changed to work in parallel;
5191 3b559640 Iustin Pop
      # currently it doesn't since parallel invocations of
5192 3b559640 Iustin Pop
      # FindUnusedMinor will conflict
5193 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = [remote_node]
5194 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
5195 efd990e4 Guido Trotter
    else:
5196 efd990e4 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = []
5197 efd990e4 Guido Trotter
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5198 efd990e4 Guido Trotter
5199 efd990e4 Guido Trotter
  def DeclareLocks(self, level):
5200 efd990e4 Guido Trotter
    # If we're not already locking all nodes in the set we have to declare the
5201 efd990e4 Guido Trotter
    # instance's primary/secondary nodes.
5202 efd990e4 Guido Trotter
    if (level == locking.LEVEL_NODE and
5203 efd990e4 Guido Trotter
        self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
5204 efd990e4 Guido Trotter
      self._LockInstancesNodes()
5205 a8083063 Iustin Pop
5206 b6e82a65 Iustin Pop
  def _RunAllocator(self):
5207 b6e82a65 Iustin Pop
    """Compute a new secondary node using an IAllocator.
5208 b6e82a65 Iustin Pop

5209 b6e82a65 Iustin Pop
    """
5210 72737a7f Iustin Pop
    ial = IAllocator(self,
5211 b6e82a65 Iustin Pop
                     mode=constants.IALLOCATOR_MODE_RELOC,
5212 b6e82a65 Iustin Pop
                     name=self.op.instance_name,
5213 b6e82a65 Iustin Pop
                     relocate_from=[self.sec_node])
5214 b6e82a65 Iustin Pop
5215 b6e82a65 Iustin Pop
    ial.Run(self.op.iallocator)
5216 b6e82a65 Iustin Pop
5217 b6e82a65 Iustin Pop
    if not ial.success:
5218 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("Can't compute nodes using"
5219 b6e82a65 Iustin Pop
                                 " iallocator '%s': %s" % (self.op.iallocator,
5220 b6e82a65 Iustin Pop
                                                           ial.info))
5221 b6e82a65 Iustin Pop
    if len(ial.nodes) != ial.required_nodes:
5222 b6e82a65 Iustin Pop
      raise errors.OpPrereqError("iallocator '%s' returned invalid number"
5223 b6e82a65 Iustin Pop
                                 " of nodes (%s), required %s" %
5224 4c4b5058 Iustin Pop
                                 (self.op.iallocator,
5225 4c4b5058 Iustin Pop
                                  len(ial.nodes), ial.required_nodes))
5226 b6e82a65 Iustin Pop
    self.op.remote_node = ial.nodes[0]
5227 86d9d3bb Iustin Pop
    self.LogInfo("Selected new secondary for the instance: %s",
5228 86d9d3bb Iustin Pop
                 self.op.remote_node)
5229 b6e82a65 Iustin Pop
5230 a8083063 Iustin Pop
  def BuildHooksEnv(self):
5231 a8083063 Iustin Pop
    """Build hooks env.
5232 a8083063 Iustin Pop

5233 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5234 a8083063 Iustin Pop

5235 a8083063 Iustin Pop
    """
5236 a8083063 Iustin Pop
    env = {
5237 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
5238 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
5239 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
5240 a8083063 Iustin Pop
      }
5241 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5242 0834c866 Iustin Pop
    nl = [
5243 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5244 0834c866 Iustin Pop
      self.instance.primary_node,
5245 0834c866 Iustin Pop
      ]
5246 0834c866 Iustin Pop
    if self.op.remote_node is not None:
5247 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
5248 a8083063 Iustin Pop
    return env, nl, nl
5249 a8083063 Iustin Pop
5250 a8083063 Iustin Pop
  def CheckPrereq(self):
5251 a8083063 Iustin Pop
    """Check prerequisites.
5252 a8083063 Iustin Pop

5253 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
5254 a8083063 Iustin Pop

5255 a8083063 Iustin Pop
    """
5256 efd990e4 Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5257 efd990e4 Guido Trotter
    assert instance is not None, \
5258 efd990e4 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5259 a8083063 Iustin Pop
    self.instance = instance
5260 a8083063 Iustin Pop
5261 7e9366f7 Iustin Pop
    if instance.disk_template != constants.DT_DRBD8:
5262 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
5263 7e9366f7 Iustin Pop
                                 " instances")
5264 a8083063 Iustin Pop
5265 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
5266 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
5267 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
5268 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
5269 a8083063 Iustin Pop
5270 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
5271 a9e0c397 Iustin Pop
5272 7e9366f7 Iustin Pop
    if self.op.iallocator is not None:
5273 de8c7666 Guido Trotter
      self._RunAllocator()
5274 b6e82a65 Iustin Pop
5275 b6e82a65 Iustin Pop
    remote_node = self.op.remote_node
5276 a9e0c397 Iustin Pop
    if remote_node is not None:
5277 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
5278 efd990e4 Guido Trotter
      assert self.remote_node_info is not None, \
5279 efd990e4 Guido Trotter
        "Cannot retrieve locked node %s" % remote_node
5280 a9e0c397 Iustin Pop
    else:
5281 a9e0c397 Iustin Pop
      self.remote_node_info = None
5282 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
5283 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
5284 3ecf6786 Iustin Pop
                                 " the instance.")
5285 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
5286 7e9366f7 Iustin Pop
      raise errors.OpPrereqError("The specified node is already the"
5287 7e9366f7 Iustin Pop
                                 " secondary node of the instance.")
5288 7e9366f7 Iustin Pop
5289 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_PRI:
5290 7e9366f7 Iustin Pop
      n1 = self.tgt_node = instance.primary_node
5291 7e9366f7 Iustin Pop
      n2 = self.oth_node = self.sec_node
5292 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_SEC:
5293 7e9366f7 Iustin Pop
      n1 = self.tgt_node = self.sec_node
5294 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5295 7e9366f7 Iustin Pop
    elif self.op.mode == constants.REPLACE_DISK_CHG:
5296 7e9366f7 Iustin Pop
      n1 = self.new_node = remote_node
5297 7e9366f7 Iustin Pop
      n2 = self.oth_node = instance.primary_node
5298 7e9366f7 Iustin Pop
      self.tgt_node = self.sec_node
5299 733a2b6a Iustin Pop
      _CheckNodeNotDrained(self, remote_node)
5300 7e9366f7 Iustin Pop
    else:
5301 7e9366f7 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replace mode")
5302 7e9366f7 Iustin Pop
5303 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n1)
5304 7e9366f7 Iustin Pop
    _CheckNodeOnline(self, n2)
5305 a9e0c397 Iustin Pop
5306 54155f52 Iustin Pop
    if not self.op.disks:
5307 54155f52 Iustin Pop
      self.op.disks = range(len(instance.disks))
5308 54155f52 Iustin Pop
5309 54155f52 Iustin Pop
    for disk_idx in self.op.disks:
5310 3e0cea06 Iustin Pop
      instance.FindDisk(disk_idx)
5311 a8083063 Iustin Pop
5312 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
5313 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
5314 a9e0c397 Iustin Pop

5315 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5316 e4376078 Iustin Pop

5317 e4376078 Iustin Pop
      1. for each disk to be replaced:
5318 e4376078 Iustin Pop

5319 e4376078 Iustin Pop
        1. create new LVs on the target node with unique names
5320 e4376078 Iustin Pop
        1. detach old LVs from the drbd device
5321 e4376078 Iustin Pop
        1. rename old LVs to name_replaced.<time_t>
5322 e4376078 Iustin Pop
        1. rename new LVs to old LVs
5323 e4376078 Iustin Pop
        1. attach the new LVs (with the old names now) to the drbd device
5324 e4376078 Iustin Pop

5325 e4376078 Iustin Pop
      1. wait for sync across all devices
5326 e4376078 Iustin Pop

5327 e4376078 Iustin Pop
      1. for each modified disk:
5328 e4376078 Iustin Pop

5329 e4376078 Iustin Pop
        1. remove old LVs (which have the name name_replaces.<time_t>)
5330 a9e0c397 Iustin Pop

5331 a9e0c397 Iustin Pop
    Failures are not very well handled.
5332 cff90b79 Iustin Pop

5333 a9e0c397 Iustin Pop
    """
5334 cff90b79 Iustin Pop
    steps_total = 6
5335 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5336 a9e0c397 Iustin Pop
    instance = self.instance
5337 a9e0c397 Iustin Pop
    iv_names = {}
5338 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
5339 a9e0c397 Iustin Pop
    # start of work
5340 a9e0c397 Iustin Pop
    cfg = self.cfg
5341 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
5342 cff90b79 Iustin Pop
    oth_node = self.oth_node
5343 cff90b79 Iustin Pop
5344 cff90b79 Iustin Pop
    # Step: check device activation
5345 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5346 cff90b79 Iustin Pop
    info("checking volume groups")
5347 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
5348 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([oth_node, tgt_node])
5349 cff90b79 Iustin Pop
    if not results:
5350 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
5351 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
5352 781de953 Iustin Pop
      res = results[node]
5353 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5354 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5355 cff90b79 Iustin Pop
                                 (my_vg, node))
5356 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5357 54155f52 Iustin Pop
      if idx not in self.op.disks:
5358 cff90b79 Iustin Pop
        continue
5359 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
5360 54155f52 Iustin Pop
        info("checking disk/%d on %s" % (idx, node))
5361 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
5362 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(node, dev)
5363 23829f6f Iustin Pop
        msg = result.RemoteFailMsg()
5364 23829f6f Iustin Pop
        if not msg and not result.payload:
5365 23829f6f Iustin Pop
          msg = "disk not found"
5366 23829f6f Iustin Pop
        if msg:
5367 23829f6f Iustin Pop
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5368 23829f6f Iustin Pop
                                   (idx, node, msg))
5369 cff90b79 Iustin Pop
5370 cff90b79 Iustin Pop
    # Step: check other node consistency
5371 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5372 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5373 54155f52 Iustin Pop
      if idx not in self.op.disks:
5374 cff90b79 Iustin Pop
        continue
5375 54155f52 Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, oth_node))
5376 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, oth_node,
5377 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
5378 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
5379 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
5380 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
5381 cff90b79 Iustin Pop
5382 cff90b79 Iustin Pop
    # Step: create new storage
5383 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5384 54155f52 Iustin Pop
    for idx, dev in enumerate(instance.disks):
5385 54155f52 Iustin Pop
      if idx not in self.op.disks:
5386 a9e0c397 Iustin Pop
        continue
5387 a9e0c397 Iustin Pop
      size = dev.size
5388 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
5389 54155f52 Iustin Pop
      lv_names = [".disk%d_%s" % (idx, suf)
5390 54155f52 Iustin Pop
                  for suf in ["data", "meta"]]
5391 b9bddb6b Iustin Pop
      names = _GenerateUniqueNames(self, lv_names)
5392 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
5393 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
5394 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
5395 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
5396 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
5397 a9e0c397 Iustin Pop
      old_lvs = dev.children
5398 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
5399 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
5400 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
5401 428958aa Iustin Pop
      # we pass force_create=True to force the LVM creation
5402 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
5403 428958aa Iustin Pop
        _CreateBlockDev(self, tgt_node, instance, new_lv, True,
5404 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5405 a9e0c397 Iustin Pop
5406 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
5407 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
5408 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5409 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
5410 781de953 Iustin Pop
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5411 781de953 Iustin Pop
      result.Raise()
5412 781de953 Iustin Pop
      if not result.data:
5413 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5414 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
5415 cff90b79 Iustin Pop
      #dev.children = []
5416 cff90b79 Iustin Pop
      #cfg.Update(instance)
5417 a9e0c397 Iustin Pop
5418 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
5419 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
5420 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
5421 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
5422 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
5423 cff90b79 Iustin Pop
5424 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
5425 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
5426 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
5427 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
5428 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
5429 cff90b79 Iustin Pop
      rlist = []
5430 cff90b79 Iustin Pop
      for to_ren in old_lvs:
5431 23829f6f Iustin Pop
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5432 23829f6f Iustin Pop
        if not result.RemoteFailMsg() and result.payload:
5433 23829f6f Iustin Pop
          # device exists
5434 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5435 cff90b79 Iustin Pop
5436 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
5437 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5438 781de953 Iustin Pop
      result.Raise()
5439 781de953 Iustin Pop
      if not result.data:
5440 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
5441 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
5442 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
5443 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5444 781de953 Iustin Pop
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5445 781de953 Iustin Pop
      result.Raise()
5446 781de953 Iustin Pop
      if not result.data:
5447 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
5448 cff90b79 Iustin Pop
5449 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
5450 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
5451 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
5452 a9e0c397 Iustin Pop
5453 cff90b79 Iustin Pop
      for disk in old_lvs:
5454 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
5455 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
5456 a9e0c397 Iustin Pop
5457 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
5458 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
5459 4504c3d6 Iustin Pop
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5460 781de953 Iustin Pop
      if result.failed or not result.data:
5461 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
5462 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5463 e1bc0878 Iustin Pop
          if msg:
5464 e1bc0878 Iustin Pop
            warning("Can't rollback device %s: %s", dev, msg,
5465 e1bc0878 Iustin Pop
                    hint="cleanup manually the unused logical volumes")
5466 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
5467 a9e0c397 Iustin Pop
5468 a9e0c397 Iustin Pop
      dev.children = new_lvs
5469 a9e0c397 Iustin Pop
      cfg.Update(instance)
5470 a9e0c397 Iustin Pop
5471 cff90b79 Iustin Pop
    # Step: wait for sync
5472 a9e0c397 Iustin Pop
5473 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5474 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5475 a9e0c397 Iustin Pop
    # return value
5476 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5477 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5478 a9e0c397 Iustin Pop
5479 a9e0c397 Iustin Pop
    # so check manually all the devices
5480 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5481 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
5482 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5483 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5484 23829f6f Iustin Pop
      if not msg and not result.payload:
5485 23829f6f Iustin Pop
        msg = "disk not found"
5486 23829f6f Iustin Pop
      if msg:
5487 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
5488 23829f6f Iustin Pop
                                 (name, msg))
5489 23829f6f Iustin Pop
      if result.payload[5]:
5490 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
5491 a9e0c397 Iustin Pop
5492 cff90b79 Iustin Pop
    # Step: remove old storage
5493 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5494 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5495 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
5496 a9e0c397 Iustin Pop
      for lv in old_lvs:
5497 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
5498 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5499 e1bc0878 Iustin Pop
        if msg:
5500 e1bc0878 Iustin Pop
          warning("Can't remove old LV: %s" % msg,
5501 e1bc0878 Iustin Pop
                  hint="manually remove unused LVs")
5502 a9e0c397 Iustin Pop
          continue
5503 a9e0c397 Iustin Pop
5504 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
5505 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
5506 a9e0c397 Iustin Pop

5507 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
5508 a9e0c397 Iustin Pop
      - for all disks of the instance:
5509 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
5510 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
5511 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
5512 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
5513 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
5514 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
5515 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
5516 a9e0c397 Iustin Pop
          not network enabled
5517 a9e0c397 Iustin Pop
      - wait for sync across all devices
5518 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
5519 a9e0c397 Iustin Pop

5520 a9e0c397 Iustin Pop
    Failures are not very well handled.
5521 0834c866 Iustin Pop

5522 a9e0c397 Iustin Pop
    """
5523 0834c866 Iustin Pop
    steps_total = 6
5524 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
5525 a9e0c397 Iustin Pop
    instance = self.instance
5526 a9e0c397 Iustin Pop
    iv_names = {}
5527 a9e0c397 Iustin Pop
    # start of work
5528 a9e0c397 Iustin Pop
    cfg = self.cfg
5529 a9e0c397 Iustin Pop
    old_node = self.tgt_node
5530 a9e0c397 Iustin Pop
    new_node = self.new_node
5531 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
5532 a2d59d8b Iustin Pop
    nodes_ip = {
5533 a2d59d8b Iustin Pop
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
5534 a2d59d8b Iustin Pop
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
5535 a2d59d8b Iustin Pop
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
5536 a2d59d8b Iustin Pop
      }
5537 0834c866 Iustin Pop
5538 0834c866 Iustin Pop
    # Step: check device activation
5539 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
5540 0834c866 Iustin Pop
    info("checking volume groups")
5541 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
5542 72737a7f Iustin Pop
    results = self.rpc.call_vg_list([pri_node, new_node])
5543 0834c866 Iustin Pop
    for node in pri_node, new_node:
5544 781de953 Iustin Pop
      res = results[node]
5545 781de953 Iustin Pop
      if res.failed or not res.data or my_vg not in res.data:
5546 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5547 0834c866 Iustin Pop
                                 (my_vg, node))
5548 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5549 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5550 0834c866 Iustin Pop
        continue
5551 d418ebfb Iustin Pop
      info("checking disk/%d on %s" % (idx, pri_node))
5552 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5553 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5554 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5555 23829f6f Iustin Pop
      if not msg and not result.payload:
5556 23829f6f Iustin Pop
        msg = "disk not found"
5557 23829f6f Iustin Pop
      if msg:
5558 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
5559 23829f6f Iustin Pop
                                 (idx, pri_node, msg))
5560 0834c866 Iustin Pop
5561 0834c866 Iustin Pop
    # Step: check other node consistency
5562 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
5563 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5564 d418ebfb Iustin Pop
      if idx not in self.op.disks:
5565 0834c866 Iustin Pop
        continue
5566 d418ebfb Iustin Pop
      info("checking disk/%d consistency on %s" % (idx, pri_node))
5567 b9bddb6b Iustin Pop
      if not _CheckDiskConsistency(self, dev, pri_node, True, ldisk=True):
5568 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
5569 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
5570 0834c866 Iustin Pop
                                 pri_node)
5571 0834c866 Iustin Pop
5572 0834c866 Iustin Pop
    # Step: create new storage
5573 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
5574 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5575 d418ebfb Iustin Pop
      info("adding new local storage on %s for disk/%d" %
5576 d418ebfb Iustin Pop
           (new_node, idx))
5577 428958aa Iustin Pop
      # we pass force_create=True to force LVM creation
5578 a9e0c397 Iustin Pop
      for new_lv in dev.children:
5579 428958aa Iustin Pop
        _CreateBlockDev(self, new_node, instance, new_lv, True,
5580 428958aa Iustin Pop
                        _GetInstanceInfoText(instance), False)
5581 a9e0c397 Iustin Pop
5582 468b46f9 Iustin Pop
    # Step 4: dbrd minors and drbd setups changes
5583 a1578d63 Iustin Pop
    # after this, we must manually remove the drbd minors on both the
5584 a1578d63 Iustin Pop
    # error and the success paths
5585 a1578d63 Iustin Pop
    minors = cfg.AllocateDRBDMinor([new_node for dev in instance.disks],
5586 a1578d63 Iustin Pop
                                   instance.name)
5587 468b46f9 Iustin Pop
    logging.debug("Allocated minors %s" % (minors,))
5588 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5589 d418ebfb Iustin Pop
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5590 d418ebfb Iustin Pop
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5591 a2d59d8b Iustin Pop
      # create new devices on new_node; note that we create two IDs:
5592 a2d59d8b Iustin Pop
      # one without port, so the drbd will be activated without
5593 a2d59d8b Iustin Pop
      # networking information on the new node at this stage, and one
5594 a2d59d8b Iustin Pop
      # with network, for the latter activation in step 4
5595 a2d59d8b Iustin Pop
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
5596 a2d59d8b Iustin Pop
      if pri_node == o_node1:
5597 a2d59d8b Iustin Pop
        p_minor = o_minor1
5598 ffa1c0dc Iustin Pop
      else:
5599 a2d59d8b Iustin Pop
        p_minor = o_minor2
5600 a2d59d8b Iustin Pop
5601 a2d59d8b Iustin Pop
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
5602 a2d59d8b Iustin Pop
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
5603 a2d59d8b Iustin Pop
5604 a2d59d8b Iustin Pop
      iv_names[idx] = (dev, dev.children, new_net_id)
5605 a1578d63 Iustin Pop
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
5606 a2d59d8b Iustin Pop
                    new_net_id)
5607 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
5608 a2d59d8b Iustin Pop
                              logical_id=new_alone_id,
5609 8a6c7011 Iustin Pop
                              children=dev.children,
5610 8a6c7011 Iustin Pop
                              size=dev.size)
5611 796cab27 Iustin Pop
      try:
5612 de12473a Iustin Pop
        _CreateSingleBlockDev(self, new_node, instance, new_drbd,
5613 de12473a Iustin Pop
                              _GetInstanceInfoText(instance), False)
5614 82759cb1 Iustin Pop
      except errors.GenericError:
5615 a1578d63 Iustin Pop
        self.cfg.ReleaseDRBDMinors(instance.name)
5616 796cab27 Iustin Pop
        raise
5617 a9e0c397 Iustin Pop
5618 d418ebfb Iustin Pop
    for idx, dev in enumerate(instance.disks):
5619 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
5620 d418ebfb Iustin Pop
      info("shutting down drbd for disk/%d on old node" % idx)
5621 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
5622 cacfd1fd Iustin Pop
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5623 cacfd1fd Iustin Pop
      if msg:
5624 cacfd1fd Iustin Pop
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5625 cacfd1fd Iustin Pop
                (idx, msg),
5626 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
5627 a9e0c397 Iustin Pop
5628 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
5629 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5630 a2d59d8b Iustin Pop
                                               instance.disks)[pri_node]
5631 642445d9 Iustin Pop
5632 a2d59d8b Iustin Pop
    msg = result.RemoteFailMsg()
5633 a2d59d8b Iustin Pop
    if msg:
5634 a2d59d8b Iustin Pop
      # detaches didn't succeed (unlikely)
5635 a1578d63 Iustin Pop
      self.cfg.ReleaseDRBDMinors(instance.name)
5636 a2d59d8b Iustin Pop
      raise errors.OpExecError("Can't detach the disks from the network on"
5637 a2d59d8b Iustin Pop
                               " old node: %s" % (msg,))
5638 642445d9 Iustin Pop
5639 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
5640 642445d9 Iustin Pop
    # the instance to point to the new secondary
5641 642445d9 Iustin Pop
    info("updating instance configuration")
5642 468b46f9 Iustin Pop
    for dev, _, new_logical_id in iv_names.itervalues():
5643 468b46f9 Iustin Pop
      dev.logical_id = new_logical_id
5644 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5645 642445d9 Iustin Pop
    cfg.Update(instance)
5646 a9e0c397 Iustin Pop
5647 642445d9 Iustin Pop
    # and now perform the drbd attach
5648 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
5649 a2d59d8b Iustin Pop
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
5650 a2d59d8b Iustin Pop
                                           instance.disks, instance.name,
5651 a2d59d8b Iustin Pop
                                           False)
5652 a2d59d8b Iustin Pop
    for to_node, to_result in result.items():
5653 a2d59d8b Iustin Pop
      msg = to_result.RemoteFailMsg()
5654 a2d59d8b Iustin Pop
      if msg:
5655 a2d59d8b Iustin Pop
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5656 a2d59d8b Iustin Pop
                hint="please do a gnt-instance info to see the"
5657 a2d59d8b Iustin Pop
                " status of disks")
5658 a9e0c397 Iustin Pop
5659 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
5660 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
5661 a9e0c397 Iustin Pop
    # return value
5662 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
5663 b9bddb6b Iustin Pop
    _WaitForSync(self, instance, unlock=True)
5664 a9e0c397 Iustin Pop
5665 a9e0c397 Iustin Pop
    # so check manually all the devices
5666 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5667 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
5668 781de953 Iustin Pop
      result = self.rpc.call_blockdev_find(pri_node, dev)
5669 23829f6f Iustin Pop
      msg = result.RemoteFailMsg()
5670 23829f6f Iustin Pop
      if not msg and not result.payload:
5671 23829f6f Iustin Pop
        msg = "disk not found"
5672 23829f6f Iustin Pop
      if msg:
5673 23829f6f Iustin Pop
        raise errors.OpExecError("Can't find DRBD device disk/%d: %s" %
5674 23829f6f Iustin Pop
                                 (idx, msg))
5675 23829f6f Iustin Pop
      if result.payload[5]:
5676 d418ebfb Iustin Pop
        raise errors.OpExecError("DRBD device disk/%d is degraded!" % idx)
5677 a9e0c397 Iustin Pop
5678 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
5679 d418ebfb Iustin Pop
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5680 d418ebfb Iustin Pop
      info("remove logical volumes for disk/%d" % idx)
5681 a9e0c397 Iustin Pop
      for lv in old_lvs:
5682 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
5683 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5684 e1bc0878 Iustin Pop
        if msg:
5685 e1bc0878 Iustin Pop
          warning("Can't remove LV on old secondary: %s", msg,
5686 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
5687 a9e0c397 Iustin Pop
5688 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
5689 a9e0c397 Iustin Pop
    """Execute disk replacement.
5690 a9e0c397 Iustin Pop

5691 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
5692 a9e0c397 Iustin Pop

5693 a9e0c397 Iustin Pop
    """
5694 a9e0c397 Iustin Pop
    instance = self.instance
5695 22985314 Guido Trotter
5696 22985314 Guido Trotter
    # Activate the instance disks if we're replacing them on a down instance
5697 0d68c45d Iustin Pop
    if not instance.admin_up:
5698 b9bddb6b Iustin Pop
      _StartInstanceDisks(self, instance, True)
5699 22985314 Guido Trotter
5700 7e9366f7 Iustin Pop
    if self.op.mode == constants.REPLACE_DISK_CHG:
5701 7e9366f7 Iustin Pop
      fn = self._ExecD8Secondary
5702 a9e0c397 Iustin Pop
    else:
5703 7e9366f7 Iustin Pop
      fn = self._ExecD8DiskOnly
5704 22985314 Guido Trotter
5705 22985314 Guido Trotter
    ret = fn(feedback_fn)
5706 22985314 Guido Trotter
5707 22985314 Guido Trotter
    # Deactivate the instance disks if we're replacing them on a down instance
5708 0d68c45d Iustin Pop
    if not instance.admin_up:
5709 b9bddb6b Iustin Pop
      _SafeShutdownInstanceDisks(self, instance)
5710 22985314 Guido Trotter
5711 22985314 Guido Trotter
    return ret
5712 a9e0c397 Iustin Pop
5713 a8083063 Iustin Pop
5714 8729e0d7 Iustin Pop
class LUGrowDisk(LogicalUnit):
5715 8729e0d7 Iustin Pop
  """Grow a disk of an instance.
5716 8729e0d7 Iustin Pop

5717 8729e0d7 Iustin Pop
  """
5718 8729e0d7 Iustin Pop
  HPATH = "disk-grow"
5719 8729e0d7 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5720 6605411d Iustin Pop
  _OP_REQP = ["instance_name", "disk", "amount", "wait_for_sync"]
5721 31e63dbf Guido Trotter
  REQ_BGL = False
5722 31e63dbf Guido Trotter
5723 31e63dbf Guido Trotter
  def ExpandNames(self):
5724 31e63dbf Guido Trotter
    self._ExpandAndLockInstance()
5725 31e63dbf Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5726 f6d9a522 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5727 31e63dbf Guido Trotter
5728 31e63dbf Guido Trotter
  def DeclareLocks(self, level):
5729 31e63dbf Guido Trotter
    if level == locking.LEVEL_NODE:
5730 31e63dbf Guido Trotter
      self._LockInstancesNodes()
5731 8729e0d7 Iustin Pop
5732 8729e0d7 Iustin Pop
  def BuildHooksEnv(self):
5733 8729e0d7 Iustin Pop
    """Build hooks env.
5734 8729e0d7 Iustin Pop

5735 8729e0d7 Iustin Pop
    This runs on the master, the primary and all the secondaries.
5736 8729e0d7 Iustin Pop

5737 8729e0d7 Iustin Pop
    """
5738 8729e0d7 Iustin Pop
    env = {
5739 8729e0d7 Iustin Pop
      "DISK": self.op.disk,
5740 8729e0d7 Iustin Pop
      "AMOUNT": self.op.amount,
5741 8729e0d7 Iustin Pop
      }
5742 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5743 8729e0d7 Iustin Pop
    nl = [
5744 d6a02168 Michael Hanselmann
      self.cfg.GetMasterNode(),
5745 8729e0d7 Iustin Pop
      self.instance.primary_node,
5746 8729e0d7 Iustin Pop
      ]
5747 8729e0d7 Iustin Pop
    return env, nl, nl
5748 8729e0d7 Iustin Pop
5749 8729e0d7 Iustin Pop
  def CheckPrereq(self):
5750 8729e0d7 Iustin Pop
    """Check prerequisites.
5751 8729e0d7 Iustin Pop

5752 8729e0d7 Iustin Pop
    This checks that the instance is in the cluster.
5753 8729e0d7 Iustin Pop

5754 8729e0d7 Iustin Pop
    """
5755 31e63dbf Guido Trotter
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
5756 31e63dbf Guido Trotter
    assert instance is not None, \
5757 31e63dbf Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
5758 6b12959c Iustin Pop
    nodenames = list(instance.all_nodes)
5759 6b12959c Iustin Pop
    for node in nodenames:
5760 7527a8a4 Iustin Pop
      _CheckNodeOnline(self, node)
5761 7527a8a4 Iustin Pop
5762 31e63dbf Guido Trotter
5763 8729e0d7 Iustin Pop
    self.instance = instance
5764 8729e0d7 Iustin Pop
5765 8729e0d7 Iustin Pop
    if instance.disk_template not in (constants.DT_PLAIN, constants.DT_DRBD8):
5766 8729e0d7 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout does not support"
5767 8729e0d7 Iustin Pop
                                 " growing.")
5768 8729e0d7 Iustin Pop
5769 ad24e046 Iustin Pop
    self.disk = instance.FindDisk(self.op.disk)
5770 8729e0d7 Iustin Pop
5771 72737a7f Iustin Pop
    nodeinfo = self.rpc.call_node_info(nodenames, self.cfg.GetVGName(),
5772 72737a7f Iustin Pop
                                       instance.hypervisor)
5773 8729e0d7 Iustin Pop
    for node in nodenames:
5774 781de953 Iustin Pop
      info = nodeinfo[node]
5775 781de953 Iustin Pop
      if info.failed or not info.data:
5776 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
5777 8729e0d7 Iustin Pop
                                   " from node '%s'" % node)
5778 781de953 Iustin Pop
      vg_free = info.data.get('vg_free', None)
5779 8729e0d7 Iustin Pop
      if not isinstance(vg_free, int):
5780 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Can't compute free disk space on"
5781 8729e0d7 Iustin Pop
                                   " node %s" % node)
5782 781de953 Iustin Pop
      if self.op.amount > vg_free:
5783 8729e0d7 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s:"
5784 8729e0d7 Iustin Pop
                                   " %d MiB available, %d MiB required" %
5785 781de953 Iustin Pop
                                   (node, vg_free, self.op.amount))
5786 8729e0d7 Iustin Pop
5787 8729e0d7 Iustin Pop
  def Exec(self, feedback_fn):
5788 8729e0d7 Iustin Pop
    """Execute disk grow.
5789 8729e0d7 Iustin Pop

5790 8729e0d7 Iustin Pop
    """
5791 8729e0d7 Iustin Pop
    instance = self.instance
5792 ad24e046 Iustin Pop
    disk = self.disk
5793 6b12959c Iustin Pop
    for node in instance.all_nodes:
5794 8729e0d7 Iustin Pop
      self.cfg.SetDiskID(disk, node)
5795 72737a7f Iustin Pop
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5796 0959c824 Iustin Pop
      msg = result.RemoteFailMsg()
5797 0959c824 Iustin Pop
      if msg:
5798 781de953 Iustin Pop
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5799 0959c824 Iustin Pop
                                 (node, msg))
5800 8729e0d7 Iustin Pop
    disk.RecordGrow(self.op.amount)
5801 8729e0d7 Iustin Pop
    self.cfg.Update(instance)
5802 6605411d Iustin Pop
    if self.op.wait_for_sync:
5803 cd4d138f Guido Trotter
      disk_abort = not _WaitForSync(self, instance)
5804 6605411d Iustin Pop
      if disk_abort:
5805 86d9d3bb Iustin Pop
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
5806 86d9d3bb Iustin Pop
                             " status.\nPlease check the instance.")
5807 8729e0d7 Iustin Pop
5808 8729e0d7 Iustin Pop
5809 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
5810 a8083063 Iustin Pop
  """Query runtime instance data.
5811 a8083063 Iustin Pop

5812 a8083063 Iustin Pop
  """
5813 57821cac Iustin Pop
  _OP_REQP = ["instances", "static"]
5814 a987fa48 Guido Trotter
  REQ_BGL = False
5815 ae5849b5 Michael Hanselmann
5816 a987fa48 Guido Trotter
  def ExpandNames(self):
5817 a987fa48 Guido Trotter
    self.needed_locks = {}
5818 a987fa48 Guido Trotter
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
5819 a987fa48 Guido Trotter
5820 a987fa48 Guido Trotter
    if not isinstance(self.op.instances, list):
5821 a987fa48 Guido Trotter
      raise errors.OpPrereqError("Invalid argument type 'instances'")
5822 a987fa48 Guido Trotter
5823 a987fa48 Guido Trotter
    if self.op.instances:
5824 a987fa48 Guido Trotter
      self.wanted_names = []
5825 a987fa48 Guido Trotter
      for name in self.op.instances:
5826 a987fa48 Guido Trotter
        full_name = self.cfg.ExpandInstanceName(name)
5827 a987fa48 Guido Trotter
        if full_name is None:
5828 f57c76e4 Iustin Pop
          raise errors.OpPrereqError("Instance '%s' not known" % name)
5829 a987fa48 Guido Trotter
        self.wanted_names.append(full_name)
5830 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
5831 a987fa48 Guido Trotter
    else:
5832 a987fa48 Guido Trotter
      self.wanted_names = None
5833 a987fa48 Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
5834 a987fa48 Guido Trotter
5835 a987fa48 Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = []
5836 a987fa48 Guido Trotter
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
5837 a987fa48 Guido Trotter
5838 a987fa48 Guido Trotter
  def DeclareLocks(self, level):
5839 a987fa48 Guido Trotter
    if level == locking.LEVEL_NODE:
5840 a987fa48 Guido Trotter
      self._LockInstancesNodes()
5841 a8083063 Iustin Pop
5842 a8083063 Iustin Pop
  def CheckPrereq(self):
5843 a8083063 Iustin Pop
    """Check prerequisites.
5844 a8083063 Iustin Pop

5845 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
5846 a8083063 Iustin Pop

5847 a8083063 Iustin Pop
    """
5848 a987fa48 Guido Trotter
    if self.wanted_names is None:
5849 a987fa48 Guido Trotter
      self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
5850 a8083063 Iustin Pop
5851 a987fa48 Guido Trotter
    self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
5852 a987fa48 Guido Trotter
                             in self.wanted_names]
5853 a987fa48 Guido Trotter
    return
5854 a8083063 Iustin Pop
5855 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
5856 a8083063 Iustin Pop
    """Compute block device status.
5857 a8083063 Iustin Pop

5858 a8083063 Iustin Pop
    """
5859 57821cac Iustin Pop
    static = self.op.static
5860 57821cac Iustin Pop
    if not static:
5861 57821cac Iustin Pop
      self.cfg.SetDiskID(dev, instance.primary_node)
5862 57821cac Iustin Pop
      dev_pstatus = self.rpc.call_blockdev_find(instance.primary_node, dev)
5863 9854f5d0 Iustin Pop
      if dev_pstatus.offline:
5864 9854f5d0 Iustin Pop
        dev_pstatus = None
5865 9854f5d0 Iustin Pop
      else:
5866 9854f5d0 Iustin Pop
        msg = dev_pstatus.RemoteFailMsg()
5867 9854f5d0 Iustin Pop
        if msg:
5868 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5869 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5870 9854f5d0 Iustin Pop
        dev_pstatus = dev_pstatus.payload
5871 57821cac Iustin Pop
    else:
5872 57821cac Iustin Pop
      dev_pstatus = None
5873 57821cac Iustin Pop
5874 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
5875 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
5876 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
5877 a8083063 Iustin Pop
        snode = dev.logical_id[1]
5878 a8083063 Iustin Pop
      else:
5879 a8083063 Iustin Pop
        snode = dev.logical_id[0]
5880 a8083063 Iustin Pop
5881 57821cac Iustin Pop
    if snode and not static:
5882 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
5883 72737a7f Iustin Pop
      dev_sstatus = self.rpc.call_blockdev_find(snode, dev)
5884 9854f5d0 Iustin Pop
      if dev_sstatus.offline:
5885 9854f5d0 Iustin Pop
        dev_sstatus = None
5886 9854f5d0 Iustin Pop
      else:
5887 9854f5d0 Iustin Pop
        msg = dev_sstatus.RemoteFailMsg()
5888 9854f5d0 Iustin Pop
        if msg:
5889 9854f5d0 Iustin Pop
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5890 9854f5d0 Iustin Pop
                                   (instance.name, msg))
5891 9854f5d0 Iustin Pop
        dev_sstatus = dev_sstatus.payload
5892 a8083063 Iustin Pop
    else:
5893 a8083063 Iustin Pop
      dev_sstatus = None
5894 a8083063 Iustin Pop
5895 a8083063 Iustin Pop
    if dev.children:
5896 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
5897 a8083063 Iustin Pop
                      for child in dev.children]
5898 a8083063 Iustin Pop
    else:
5899 a8083063 Iustin Pop
      dev_children = []
5900 a8083063 Iustin Pop
5901 a8083063 Iustin Pop
    data = {
5902 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
5903 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
5904 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
5905 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
5906 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
5907 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
5908 a8083063 Iustin Pop
      "children": dev_children,
5909 b6fdf8b8 Iustin Pop
      "mode": dev.mode,
5910 c98162a7 Iustin Pop
      "size": dev.size,
5911 a8083063 Iustin Pop
      }
5912 a8083063 Iustin Pop
5913 a8083063 Iustin Pop
    return data
5914 a8083063 Iustin Pop
5915 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
5916 a8083063 Iustin Pop
    """Gather and return data"""
5917 a8083063 Iustin Pop
    result = {}
5918 338e51e8 Iustin Pop
5919 338e51e8 Iustin Pop
    cluster = self.cfg.GetClusterInfo()
5920 338e51e8 Iustin Pop
5921 a8083063 Iustin Pop
    for instance in self.wanted_instances:
5922 57821cac Iustin Pop
      if not self.op.static:
5923 57821cac Iustin Pop
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5924 57821cac Iustin Pop
                                                  instance.name,
5925 57821cac Iustin Pop
                                                  instance.hypervisor)
5926 781de953 Iustin Pop
        remote_info.Raise()
5927 781de953 Iustin Pop
        remote_info = remote_info.data
5928 57821cac Iustin Pop
        if remote_info and "state" in remote_info:
5929 57821cac Iustin Pop
          remote_state = "up"
5930 57821cac Iustin Pop
        else:
5931 57821cac Iustin Pop
          remote_state = "down"
5932 a8083063 Iustin Pop
      else:
5933 57821cac Iustin Pop
        remote_state = None
5934 0d68c45d Iustin Pop
      if instance.admin_up:
5935 a8083063 Iustin Pop
        config_state = "up"
5936 0d68c45d Iustin Pop
      else:
5937 0d68c45d Iustin Pop
        config_state = "down"
5938 a8083063 Iustin Pop
5939 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
5940 a8083063 Iustin Pop
               for device in instance.disks]
5941 a8083063 Iustin Pop
5942 a8083063 Iustin Pop
      idict = {
5943 a8083063 Iustin Pop
        "name": instance.name,
5944 a8083063 Iustin Pop
        "config_state": config_state,
5945 a8083063 Iustin Pop
        "run_state": remote_state,
5946 a8083063 Iustin Pop
        "pnode": instance.primary_node,
5947 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
5948 a8083063 Iustin Pop
        "os": instance.os,
5949 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
5950 a8083063 Iustin Pop
        "disks": disks,
5951 e69d05fd Iustin Pop
        "hypervisor": instance.hypervisor,
5952 24838135 Iustin Pop
        "network_port": instance.network_port,
5953 24838135 Iustin Pop
        "hv_instance": instance.hvparams,
5954 338e51e8 Iustin Pop
        "hv_actual": cluster.FillHV(instance),
5955 338e51e8 Iustin Pop
        "be_instance": instance.beparams,
5956 338e51e8 Iustin Pop
        "be_actual": cluster.FillBE(instance),
5957 a8083063 Iustin Pop
        }
5958 a8083063 Iustin Pop
5959 a8083063 Iustin Pop
      result[instance.name] = idict
5960 a8083063 Iustin Pop
5961 a8083063 Iustin Pop
    return result
5962 a8083063 Iustin Pop
5963 a8083063 Iustin Pop
5964 7767bbf5 Manuel Franceschini
class LUSetInstanceParams(LogicalUnit):
5965 a8083063 Iustin Pop
  """Modifies an instances's parameters.
5966 a8083063 Iustin Pop

5967 a8083063 Iustin Pop
  """
5968 a8083063 Iustin Pop
  HPATH = "instance-modify"
5969 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
5970 24991749 Iustin Pop
  _OP_REQP = ["instance_name"]
5971 1a5c7281 Guido Trotter
  REQ_BGL = False
5972 1a5c7281 Guido Trotter
5973 24991749 Iustin Pop
  def CheckArguments(self):
5974 24991749 Iustin Pop
    if not hasattr(self.op, 'nics'):
5975 24991749 Iustin Pop
      self.op.nics = []
5976 24991749 Iustin Pop
    if not hasattr(self.op, 'disks'):
5977 24991749 Iustin Pop
      self.op.disks = []
5978 24991749 Iustin Pop
    if not hasattr(self.op, 'beparams'):
5979 24991749 Iustin Pop
      self.op.beparams = {}
5980 24991749 Iustin Pop
    if not hasattr(self.op, 'hvparams'):
5981 24991749 Iustin Pop
      self.op.hvparams = {}
5982 24991749 Iustin Pop
    self.op.force = getattr(self.op, "force", False)
5983 24991749 Iustin Pop
    if not (self.op.nics or self.op.disks or
5984 24991749 Iustin Pop
            self.op.hvparams or self.op.beparams):
5985 24991749 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
5986 24991749 Iustin Pop
5987 24991749 Iustin Pop
    # Disk validation
5988 24991749 Iustin Pop
    disk_addremove = 0
5989 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
5990 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
5991 24991749 Iustin Pop
        disk_addremove += 1
5992 24991749 Iustin Pop
        continue
5993 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
5994 24991749 Iustin Pop
        disk_addremove += 1
5995 24991749 Iustin Pop
      else:
5996 24991749 Iustin Pop
        if not isinstance(disk_op, int):
5997 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index")
5998 24991749 Iustin Pop
      if disk_op == constants.DDM_ADD:
5999 24991749 Iustin Pop
        mode = disk_dict.setdefault('mode', constants.DISK_RDWR)
6000 6ec66eae Iustin Pop
        if mode not in constants.DISK_ACCESS_SET:
6001 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode)
6002 24991749 Iustin Pop
        size = disk_dict.get('size', None)
6003 24991749 Iustin Pop
        if size is None:
6004 24991749 Iustin Pop
          raise errors.OpPrereqError("Required disk parameter size missing")
6005 24991749 Iustin Pop
        try:
6006 24991749 Iustin Pop
          size = int(size)
6007 24991749 Iustin Pop
        except ValueError, err:
6008 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk size parameter: %s" %
6009 24991749 Iustin Pop
                                     str(err))
6010 24991749 Iustin Pop
        disk_dict['size'] = size
6011 24991749 Iustin Pop
      else:
6012 24991749 Iustin Pop
        # modification of disk
6013 24991749 Iustin Pop
        if 'size' in disk_dict:
6014 24991749 Iustin Pop
          raise errors.OpPrereqError("Disk size change not possible, use"
6015 24991749 Iustin Pop
                                     " grow-disk")
6016 24991749 Iustin Pop
6017 24991749 Iustin Pop
    if disk_addremove > 1:
6018 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one disk add or remove operation"
6019 24991749 Iustin Pop
                                 " supported at a time")
6020 24991749 Iustin Pop
6021 24991749 Iustin Pop
    # NIC validation
6022 24991749 Iustin Pop
    nic_addremove = 0
6023 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6024 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6025 24991749 Iustin Pop
        nic_addremove += 1
6026 24991749 Iustin Pop
        continue
6027 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6028 24991749 Iustin Pop
        nic_addremove += 1
6029 24991749 Iustin Pop
      else:
6030 24991749 Iustin Pop
        if not isinstance(nic_op, int):
6031 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid nic index")
6032 24991749 Iustin Pop
6033 24991749 Iustin Pop
      # nic_dict should be a dict
6034 24991749 Iustin Pop
      nic_ip = nic_dict.get('ip', None)
6035 24991749 Iustin Pop
      if nic_ip is not None:
6036 5c44da6a Guido Trotter
        if nic_ip.lower() == constants.VALUE_NONE:
6037 24991749 Iustin Pop
          nic_dict['ip'] = None
6038 24991749 Iustin Pop
        else:
6039 24991749 Iustin Pop
          if not utils.IsValidIP(nic_ip):
6040 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip)
6041 5c44da6a Guido Trotter
6042 5c44da6a Guido Trotter
      if nic_op == constants.DDM_ADD:
6043 5c44da6a Guido Trotter
        nic_bridge = nic_dict.get('bridge', None)
6044 5c44da6a Guido Trotter
        if nic_bridge is None:
6045 5c44da6a Guido Trotter
          nic_dict['bridge'] = self.cfg.GetDefBridge()
6046 5c44da6a Guido Trotter
        nic_mac = nic_dict.get('mac', None)
6047 5c44da6a Guido Trotter
        if nic_mac is None:
6048 5c44da6a Guido Trotter
          nic_dict['mac'] = constants.VALUE_AUTO
6049 5c44da6a Guido Trotter
6050 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6051 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6052 24991749 Iustin Pop
        if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6053 24991749 Iustin Pop
          if not utils.IsValidMac(nic_mac):
6054 24991749 Iustin Pop
            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac)
6055 5c44da6a Guido Trotter
        if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
6056 5c44da6a Guido Trotter
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
6057 5c44da6a Guido Trotter
                                     " modifying an existing nic")
6058 5c44da6a Guido Trotter
6059 24991749 Iustin Pop
    if nic_addremove > 1:
6060 24991749 Iustin Pop
      raise errors.OpPrereqError("Only one NIC add or remove operation"
6061 24991749 Iustin Pop
                                 " supported at a time")
6062 24991749 Iustin Pop
6063 1a5c7281 Guido Trotter
  def ExpandNames(self):
6064 1a5c7281 Guido Trotter
    self._ExpandAndLockInstance()
6065 74409b12 Iustin Pop
    self.needed_locks[locking.LEVEL_NODE] = []
6066 74409b12 Iustin Pop
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
6067 74409b12 Iustin Pop
6068 74409b12 Iustin Pop
  def DeclareLocks(self, level):
6069 74409b12 Iustin Pop
    if level == locking.LEVEL_NODE:
6070 74409b12 Iustin Pop
      self._LockInstancesNodes()
6071 a8083063 Iustin Pop
6072 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6073 a8083063 Iustin Pop
    """Build hooks env.
6074 a8083063 Iustin Pop

6075 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
6076 a8083063 Iustin Pop

6077 a8083063 Iustin Pop
    """
6078 396e1b78 Michael Hanselmann
    args = dict()
6079 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.be_new:
6080 338e51e8 Iustin Pop
      args['memory'] = self.be_new[constants.BE_MEMORY]
6081 338e51e8 Iustin Pop
    if constants.BE_VCPUS in self.be_new:
6082 61be6ba4 Iustin Pop
      args['vcpus'] = self.be_new[constants.BE_VCPUS]
6083 d8dcf3c9 Guido Trotter
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
6084 d8dcf3c9 Guido Trotter
    # information at all.
6085 d8dcf3c9 Guido Trotter
    if self.op.nics:
6086 d8dcf3c9 Guido Trotter
      args['nics'] = []
6087 d8dcf3c9 Guido Trotter
      nic_override = dict(self.op.nics)
6088 d8dcf3c9 Guido Trotter
      for idx, nic in enumerate(self.instance.nics):
6089 d8dcf3c9 Guido Trotter
        if idx in nic_override:
6090 d8dcf3c9 Guido Trotter
          this_nic_override = nic_override[idx]
6091 d8dcf3c9 Guido Trotter
        else:
6092 d8dcf3c9 Guido Trotter
          this_nic_override = {}
6093 d8dcf3c9 Guido Trotter
        if 'ip' in this_nic_override:
6094 d8dcf3c9 Guido Trotter
          ip = this_nic_override['ip']
6095 d8dcf3c9 Guido Trotter
        else:
6096 d8dcf3c9 Guido Trotter
          ip = nic.ip
6097 d8dcf3c9 Guido Trotter
        if 'bridge' in this_nic_override:
6098 d8dcf3c9 Guido Trotter
          bridge = this_nic_override['bridge']
6099 d8dcf3c9 Guido Trotter
        else:
6100 d8dcf3c9 Guido Trotter
          bridge = nic.bridge
6101 d8dcf3c9 Guido Trotter
        if 'mac' in this_nic_override:
6102 d8dcf3c9 Guido Trotter
          mac = this_nic_override['mac']
6103 d8dcf3c9 Guido Trotter
        else:
6104 d8dcf3c9 Guido Trotter
          mac = nic.mac
6105 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6106 d8dcf3c9 Guido Trotter
      if constants.DDM_ADD in nic_override:
6107 d8dcf3c9 Guido Trotter
        ip = nic_override[constants.DDM_ADD].get('ip', None)
6108 d8dcf3c9 Guido Trotter
        bridge = nic_override[constants.DDM_ADD]['bridge']
6109 d8dcf3c9 Guido Trotter
        mac = nic_override[constants.DDM_ADD]['mac']
6110 d8dcf3c9 Guido Trotter
        args['nics'].append((ip, bridge, mac))
6111 d8dcf3c9 Guido Trotter
      elif constants.DDM_REMOVE in nic_override:
6112 d8dcf3c9 Guido Trotter
        del args['nics'][-1]
6113 d8dcf3c9 Guido Trotter
6114 338e51e8 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
6115 6b12959c Iustin Pop
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
6116 a8083063 Iustin Pop
    return env, nl, nl
6117 a8083063 Iustin Pop
6118 a8083063 Iustin Pop
  def CheckPrereq(self):
6119 a8083063 Iustin Pop
    """Check prerequisites.
6120 a8083063 Iustin Pop

6121 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
6122 a8083063 Iustin Pop

6123 a8083063 Iustin Pop
    """
6124 7c4d6c7b Michael Hanselmann
    self.force = self.op.force
6125 a8083063 Iustin Pop
6126 74409b12 Iustin Pop
    # checking the new params on the primary/secondary nodes
6127 31a853d2 Iustin Pop
6128 cfefe007 Guido Trotter
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
6129 1a5c7281 Guido Trotter
    assert self.instance is not None, \
6130 1a5c7281 Guido Trotter
      "Cannot retrieve locked instance %s" % self.op.instance_name
6131 6b12959c Iustin Pop
    pnode = instance.primary_node
6132 6b12959c Iustin Pop
    nodelist = list(instance.all_nodes)
6133 74409b12 Iustin Pop
6134 338e51e8 Iustin Pop
    # hvparams processing
6135 74409b12 Iustin Pop
    if self.op.hvparams:
6136 74409b12 Iustin Pop
      i_hvdict = copy.deepcopy(instance.hvparams)
6137 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6138 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6139 74409b12 Iustin Pop
          try:
6140 74409b12 Iustin Pop
            del i_hvdict[key]
6141 74409b12 Iustin Pop
          except KeyError:
6142 74409b12 Iustin Pop
            pass
6143 74409b12 Iustin Pop
        else:
6144 74409b12 Iustin Pop
          i_hvdict[key] = val
6145 74409b12 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6146 a5728081 Guido Trotter
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
6147 74409b12 Iustin Pop
      hv_new = cluster.FillDict(cluster.hvparams[instance.hypervisor],
6148 74409b12 Iustin Pop
                                i_hvdict)
6149 74409b12 Iustin Pop
      # local check
6150 74409b12 Iustin Pop
      hypervisor.GetHypervisor(
6151 74409b12 Iustin Pop
        instance.hypervisor).CheckParameterSyntax(hv_new)
6152 74409b12 Iustin Pop
      _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
6153 338e51e8 Iustin Pop
      self.hv_new = hv_new # the new actual values
6154 338e51e8 Iustin Pop
      self.hv_inst = i_hvdict # the new dict (without defaults)
6155 338e51e8 Iustin Pop
    else:
6156 338e51e8 Iustin Pop
      self.hv_new = self.hv_inst = {}
6157 338e51e8 Iustin Pop
6158 338e51e8 Iustin Pop
    # beparams processing
6159 338e51e8 Iustin Pop
    if self.op.beparams:
6160 338e51e8 Iustin Pop
      i_bedict = copy.deepcopy(instance.beparams)
6161 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6162 8edcd611 Guido Trotter
        if val == constants.VALUE_DEFAULT:
6163 338e51e8 Iustin Pop
          try:
6164 338e51e8 Iustin Pop
            del i_bedict[key]
6165 338e51e8 Iustin Pop
          except KeyError:
6166 338e51e8 Iustin Pop
            pass
6167 338e51e8 Iustin Pop
        else:
6168 338e51e8 Iustin Pop
          i_bedict[key] = val
6169 338e51e8 Iustin Pop
      cluster = self.cfg.GetClusterInfo()
6170 a5728081 Guido Trotter
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
6171 338e51e8 Iustin Pop
      be_new = cluster.FillDict(cluster.beparams[constants.BEGR_DEFAULT],
6172 338e51e8 Iustin Pop
                                i_bedict)
6173 338e51e8 Iustin Pop
      self.be_new = be_new # the new actual values
6174 338e51e8 Iustin Pop
      self.be_inst = i_bedict # the new dict (without defaults)
6175 338e51e8 Iustin Pop
    else:
6176 b637ae4d Iustin Pop
      self.be_new = self.be_inst = {}
6177 74409b12 Iustin Pop
6178 cfefe007 Guido Trotter
    self.warn = []
6179 647a5d80 Iustin Pop
6180 338e51e8 Iustin Pop
    if constants.BE_MEMORY in self.op.beparams and not self.force:
6181 647a5d80 Iustin Pop
      mem_check_list = [pnode]
6182 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6183 c0f2b229 Iustin Pop
        # either we changed auto_balance to yes or it was from before
6184 647a5d80 Iustin Pop
        mem_check_list.extend(instance.secondary_nodes)
6185 72737a7f Iustin Pop
      instance_info = self.rpc.call_instance_info(pnode, instance.name,
6186 72737a7f Iustin Pop
                                                  instance.hypervisor)
6187 647a5d80 Iustin Pop
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6188 72737a7f Iustin Pop
                                         instance.hypervisor)
6189 781de953 Iustin Pop
      if nodeinfo[pnode].failed or not isinstance(nodeinfo[pnode].data, dict):
6190 cfefe007 Guido Trotter
        # Assume the primary node is unreachable and go ahead
6191 cfefe007 Guido Trotter
        self.warn.append("Can't get info from primary node %s" % pnode)
6192 cfefe007 Guido Trotter
      else:
6193 781de953 Iustin Pop
        if not instance_info.failed and instance_info.data:
6194 ade0e8cd Guido Trotter
          current_mem = int(instance_info.data['memory'])
6195 cfefe007 Guido Trotter
        else:
6196 cfefe007 Guido Trotter
          # Assume instance not running
6197 cfefe007 Guido Trotter
          # (there is a slight race condition here, but it's not very probable,
6198 cfefe007 Guido Trotter
          # and we have no other way to check)
6199 cfefe007 Guido Trotter
          current_mem = 0
6200 338e51e8 Iustin Pop
        miss_mem = (be_new[constants.BE_MEMORY] - current_mem -
6201 781de953 Iustin Pop
                    nodeinfo[pnode].data['memory_free'])
6202 cfefe007 Guido Trotter
        if miss_mem > 0:
6203 cfefe007 Guido Trotter
          raise errors.OpPrereqError("This change will prevent the instance"
6204 cfefe007 Guido Trotter
                                     " from starting, due to %d MB of memory"
6205 cfefe007 Guido Trotter
                                     " missing on its primary node" % miss_mem)
6206 cfefe007 Guido Trotter
6207 c0f2b229 Iustin Pop
      if be_new[constants.BE_AUTO_BALANCE]:
6208 ea33068f Iustin Pop
        for node, nres in nodeinfo.iteritems():
6209 ea33068f Iustin Pop
          if node not in instance.secondary_nodes:
6210 ea33068f Iustin Pop
            continue
6211 781de953 Iustin Pop
          if nres.failed or not isinstance(nres.data, dict):
6212 647a5d80 Iustin Pop
            self.warn.append("Can't get info from secondary node %s" % node)
6213 781de953 Iustin Pop
          elif be_new[constants.BE_MEMORY] > nres.data['memory_free']:
6214 647a5d80 Iustin Pop
            self.warn.append("Not enough memory to failover instance to"
6215 647a5d80 Iustin Pop
                             " secondary node %s" % node)
6216 5bc84f33 Alexander Schreiber
6217 24991749 Iustin Pop
    # NIC processing
6218 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6219 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6220 24991749 Iustin Pop
        if not instance.nics:
6221 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance has no NICs, cannot remove")
6222 24991749 Iustin Pop
        continue
6223 24991749 Iustin Pop
      if nic_op != constants.DDM_ADD:
6224 24991749 Iustin Pop
        # an existing nic
6225 24991749 Iustin Pop
        if nic_op < 0 or nic_op >= len(instance.nics):
6226 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid NIC index %s, valid values"
6227 24991749 Iustin Pop
                                     " are 0 to %d" %
6228 24991749 Iustin Pop
                                     (nic_op, len(instance.nics)))
6229 5c44da6a Guido Trotter
      if 'bridge' in nic_dict:
6230 5c44da6a Guido Trotter
        nic_bridge = nic_dict['bridge']
6231 5c44da6a Guido Trotter
        if nic_bridge is None:
6232 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic bridge to None')
6233 24991749 Iustin Pop
        if not self.rpc.call_bridges_exist(pnode, [nic_bridge]):
6234 24991749 Iustin Pop
          msg = ("Bridge '%s' doesn't exist on one of"
6235 24991749 Iustin Pop
                 " the instance nodes" % nic_bridge)
6236 24991749 Iustin Pop
          if self.force:
6237 24991749 Iustin Pop
            self.warn.append(msg)
6238 24991749 Iustin Pop
          else:
6239 24991749 Iustin Pop
            raise errors.OpPrereqError(msg)
6240 5c44da6a Guido Trotter
      if 'mac' in nic_dict:
6241 5c44da6a Guido Trotter
        nic_mac = nic_dict['mac']
6242 5c44da6a Guido Trotter
        if nic_mac is None:
6243 5c44da6a Guido Trotter
          raise errors.OpPrereqError('Cannot set the nic mac to None')
6244 5c44da6a Guido Trotter
        elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
6245 5c44da6a Guido Trotter
          # otherwise generate the mac
6246 5c44da6a Guido Trotter
          nic_dict['mac'] = self.cfg.GenerateMAC()
6247 5c44da6a Guido Trotter
        else:
6248 5c44da6a Guido Trotter
          # or validate/reserve the current one
6249 5c44da6a Guido Trotter
          if self.cfg.IsMacInUse(nic_mac):
6250 5c44da6a Guido Trotter
            raise errors.OpPrereqError("MAC address %s already in use"
6251 5c44da6a Guido Trotter
                                       " in cluster" % nic_mac)
6252 24991749 Iustin Pop
6253 24991749 Iustin Pop
    # DISK processing
6254 24991749 Iustin Pop
    if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
6255 24991749 Iustin Pop
      raise errors.OpPrereqError("Disk operations not supported for"
6256 24991749 Iustin Pop
                                 " diskless instances")
6257 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6258 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6259 24991749 Iustin Pop
        if len(instance.disks) == 1:
6260 24991749 Iustin Pop
          raise errors.OpPrereqError("Cannot remove the last disk of"
6261 24991749 Iustin Pop
                                     " an instance")
6262 24991749 Iustin Pop
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6263 24991749 Iustin Pop
        ins_l = ins_l[pnode]
6264 4cfb9426 Iustin Pop
        if ins_l.failed or not isinstance(ins_l.data, list):
6265 24991749 Iustin Pop
          raise errors.OpPrereqError("Can't contact node '%s'" % pnode)
6266 4cfb9426 Iustin Pop
        if instance.name in ins_l.data:
6267 24991749 Iustin Pop
          raise errors.OpPrereqError("Instance is running, can't remove"
6268 24991749 Iustin Pop
                                     " disks.")
6269 24991749 Iustin Pop
6270 24991749 Iustin Pop
      if (disk_op == constants.DDM_ADD and
6271 24991749 Iustin Pop
          len(instance.nics) >= constants.MAX_DISKS):
6272 24991749 Iustin Pop
        raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
6273 24991749 Iustin Pop
                                   " add more" % constants.MAX_DISKS)
6274 24991749 Iustin Pop
      if disk_op not in (constants.DDM_ADD, constants.DDM_REMOVE):
6275 24991749 Iustin Pop
        # an existing disk
6276 24991749 Iustin Pop
        if disk_op < 0 or disk_op >= len(instance.disks):
6277 24991749 Iustin Pop
          raise errors.OpPrereqError("Invalid disk index %s, valid values"
6278 24991749 Iustin Pop
                                     " are 0 to %d" %
6279 24991749 Iustin Pop
                                     (disk_op, len(instance.disks)))
6280 24991749 Iustin Pop
6281 a8083063 Iustin Pop
    return
6282 a8083063 Iustin Pop
6283 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6284 a8083063 Iustin Pop
    """Modifies an instance.
6285 a8083063 Iustin Pop

6286 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
6287 24991749 Iustin Pop

6288 a8083063 Iustin Pop
    """
6289 cfefe007 Guido Trotter
    # Process here the warnings from CheckPrereq, as we don't have a
6290 cfefe007 Guido Trotter
    # feedback_fn there.
6291 cfefe007 Guido Trotter
    for warn in self.warn:
6292 cfefe007 Guido Trotter
      feedback_fn("WARNING: %s" % warn)
6293 cfefe007 Guido Trotter
6294 a8083063 Iustin Pop
    result = []
6295 a8083063 Iustin Pop
    instance = self.instance
6296 24991749 Iustin Pop
    # disk changes
6297 24991749 Iustin Pop
    for disk_op, disk_dict in self.op.disks:
6298 24991749 Iustin Pop
      if disk_op == constants.DDM_REMOVE:
6299 24991749 Iustin Pop
        # remove the last disk
6300 24991749 Iustin Pop
        device = instance.disks.pop()
6301 24991749 Iustin Pop
        device_idx = len(instance.disks)
6302 24991749 Iustin Pop
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6303 24991749 Iustin Pop
          self.cfg.SetDiskID(disk, node)
6304 e1bc0878 Iustin Pop
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6305 e1bc0878 Iustin Pop
          if msg:
6306 e1bc0878 Iustin Pop
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6307 e1bc0878 Iustin Pop
                            " continuing anyway", device_idx, node, msg)
6308 24991749 Iustin Pop
        result.append(("disk/%d" % device_idx, "remove"))
6309 24991749 Iustin Pop
      elif disk_op == constants.DDM_ADD:
6310 24991749 Iustin Pop
        # add a new disk
6311 24991749 Iustin Pop
        if instance.disk_template == constants.DT_FILE:
6312 24991749 Iustin Pop
          file_driver, file_path = instance.disks[0].logical_id
6313 24991749 Iustin Pop
          file_path = os.path.dirname(file_path)
6314 24991749 Iustin Pop
        else:
6315 24991749 Iustin Pop
          file_driver = file_path = None
6316 24991749 Iustin Pop
        disk_idx_base = len(instance.disks)
6317 24991749 Iustin Pop
        new_disk = _GenerateDiskTemplate(self,
6318 24991749 Iustin Pop
                                         instance.disk_template,
6319 32388e6d Iustin Pop
                                         instance.name, instance.primary_node,
6320 24991749 Iustin Pop
                                         instance.secondary_nodes,
6321 24991749 Iustin Pop
                                         [disk_dict],
6322 24991749 Iustin Pop
                                         file_path,
6323 24991749 Iustin Pop
                                         file_driver,
6324 24991749 Iustin Pop
                                         disk_idx_base)[0]
6325 24991749 Iustin Pop
        instance.disks.append(new_disk)
6326 24991749 Iustin Pop
        info = _GetInstanceInfoText(instance)
6327 24991749 Iustin Pop
6328 24991749 Iustin Pop
        logging.info("Creating volume %s for instance %s",
6329 24991749 Iustin Pop
                     new_disk.iv_name, instance.name)
6330 24991749 Iustin Pop
        # Note: this needs to be kept in sync with _CreateDisks
6331 24991749 Iustin Pop
        #HARDCODE
6332 428958aa Iustin Pop
        for node in instance.all_nodes:
6333 428958aa Iustin Pop
          f_create = node == instance.primary_node
6334 796cab27 Iustin Pop
          try:
6335 428958aa Iustin Pop
            _CreateBlockDev(self, node, instance, new_disk,
6336 428958aa Iustin Pop
                            f_create, info, f_create)
6337 1492cca7 Iustin Pop
          except errors.OpExecError, err:
6338 24991749 Iustin Pop
            self.LogWarning("Failed to create volume %s (%s) on"
6339 428958aa Iustin Pop
                            " node %s: %s",
6340 428958aa Iustin Pop
                            new_disk.iv_name, new_disk, node, err)
6341 24991749 Iustin Pop
        result.append(("disk/%d" % disk_idx_base, "add:size=%s,mode=%s" %
6342 24991749 Iustin Pop
                       (new_disk.size, new_disk.mode)))
6343 24991749 Iustin Pop
      else:
6344 24991749 Iustin Pop
        # change a given disk
6345 24991749 Iustin Pop
        instance.disks[disk_op].mode = disk_dict['mode']
6346 24991749 Iustin Pop
        result.append(("disk.mode/%d" % disk_op, disk_dict['mode']))
6347 24991749 Iustin Pop
    # NIC changes
6348 24991749 Iustin Pop
    for nic_op, nic_dict in self.op.nics:
6349 24991749 Iustin Pop
      if nic_op == constants.DDM_REMOVE:
6350 24991749 Iustin Pop
        # remove the last nic
6351 24991749 Iustin Pop
        del instance.nics[-1]
6352 24991749 Iustin Pop
        result.append(("nic.%d" % len(instance.nics), "remove"))
6353 24991749 Iustin Pop
      elif nic_op == constants.DDM_ADD:
6354 5c44da6a Guido Trotter
        # mac and bridge should be set, by now
6355 5c44da6a Guido Trotter
        mac = nic_dict['mac']
6356 5c44da6a Guido Trotter
        bridge = nic_dict['bridge']
6357 24991749 Iustin Pop
        new_nic = objects.NIC(mac=mac, ip=nic_dict.get('ip', None),
6358 5c44da6a Guido Trotter
                              bridge=bridge)
6359 24991749 Iustin Pop
        instance.nics.append(new_nic)
6360 24991749 Iustin Pop
        result.append(("nic.%d" % (len(instance.nics) - 1),
6361 24991749 Iustin Pop
                       "add:mac=%s,ip=%s,bridge=%s" %
6362 24991749 Iustin Pop
                       (new_nic.mac, new_nic.ip, new_nic.bridge)))
6363 24991749 Iustin Pop
      else:
6364 24991749 Iustin Pop
        # change a given nic
6365 24991749 Iustin Pop
        for key in 'mac', 'ip', 'bridge':
6366 24991749 Iustin Pop
          if key in nic_dict:
6367 24991749 Iustin Pop
            setattr(instance.nics[nic_op], key, nic_dict[key])
6368 24991749 Iustin Pop
            result.append(("nic.%s/%d" % (key, nic_op), nic_dict[key]))
6369 24991749 Iustin Pop
6370 24991749 Iustin Pop
    # hvparams changes
6371 74409b12 Iustin Pop
    if self.op.hvparams:
6372 12649e35 Guido Trotter
      instance.hvparams = self.hv_inst
6373 74409b12 Iustin Pop
      for key, val in self.op.hvparams.iteritems():
6374 74409b12 Iustin Pop
        result.append(("hv/%s" % key, val))
6375 24991749 Iustin Pop
6376 24991749 Iustin Pop
    # beparams changes
6377 338e51e8 Iustin Pop
    if self.op.beparams:
6378 338e51e8 Iustin Pop
      instance.beparams = self.be_inst
6379 338e51e8 Iustin Pop
      for key, val in self.op.beparams.iteritems():
6380 338e51e8 Iustin Pop
        result.append(("be/%s" % key, val))
6381 a8083063 Iustin Pop
6382 ea94e1cd Guido Trotter
    self.cfg.Update(instance)
6383 a8083063 Iustin Pop
6384 a8083063 Iustin Pop
    return result
6385 a8083063 Iustin Pop
6386 a8083063 Iustin Pop
6387 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
6388 a8083063 Iustin Pop
  """Query the exports list
6389 a8083063 Iustin Pop

6390 a8083063 Iustin Pop
  """
6391 895ecd9c Guido Trotter
  _OP_REQP = ['nodes']
6392 21a15682 Guido Trotter
  REQ_BGL = False
6393 21a15682 Guido Trotter
6394 21a15682 Guido Trotter
  def ExpandNames(self):
6395 21a15682 Guido Trotter
    self.needed_locks = {}
6396 21a15682 Guido Trotter
    self.share_locks[locking.LEVEL_NODE] = 1
6397 21a15682 Guido Trotter
    if not self.op.nodes:
6398 e310b019 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6399 21a15682 Guido Trotter
    else:
6400 21a15682 Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = \
6401 21a15682 Guido Trotter
        _GetWantedNodes(self, self.op.nodes)
6402 a8083063 Iustin Pop
6403 a8083063 Iustin Pop
  def CheckPrereq(self):
6404 21a15682 Guido Trotter
    """Check prerequisites.
6405 a8083063 Iustin Pop

6406 a8083063 Iustin Pop
    """
6407 21a15682 Guido Trotter
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
6408 a8083063 Iustin Pop
6409 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6410 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
6411 a8083063 Iustin Pop

6412 e4376078 Iustin Pop
    @rtype: dict
6413 e4376078 Iustin Pop
    @return: a dictionary with the structure node->(export-list)
6414 e4376078 Iustin Pop
        where export-list is a list of the instances exported on
6415 e4376078 Iustin Pop
        that node.
6416 a8083063 Iustin Pop

6417 a8083063 Iustin Pop
    """
6418 b04285f2 Guido Trotter
    rpcresult = self.rpc.call_export_list(self.nodes)
6419 b04285f2 Guido Trotter
    result = {}
6420 b04285f2 Guido Trotter
    for node in rpcresult:
6421 b04285f2 Guido Trotter
      if rpcresult[node].failed:
6422 b04285f2 Guido Trotter
        result[node] = False
6423 b04285f2 Guido Trotter
      else:
6424 b04285f2 Guido Trotter
        result[node] = rpcresult[node].data
6425 b04285f2 Guido Trotter
6426 b04285f2 Guido Trotter
    return result
6427 a8083063 Iustin Pop
6428 a8083063 Iustin Pop
6429 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
6430 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
6431 a8083063 Iustin Pop

6432 a8083063 Iustin Pop
  """
6433 a8083063 Iustin Pop
  HPATH = "instance-export"
6434 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
6435 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
6436 6657590e Guido Trotter
  REQ_BGL = False
6437 6657590e Guido Trotter
6438 6657590e Guido Trotter
  def ExpandNames(self):
6439 6657590e Guido Trotter
    self._ExpandAndLockInstance()
6440 6657590e Guido Trotter
    # FIXME: lock only instance primary and destination node
6441 6657590e Guido Trotter
    #
6442 6657590e Guido Trotter
    # Sad but true, for now we have do lock all nodes, as we don't know where
6443 6657590e Guido Trotter
    # the previous export might be, and and in this LU we search for it and
6444 6657590e Guido Trotter
    # remove it from its current node. In the future we could fix this by:
6445 6657590e Guido Trotter
    #  - making a tasklet to search (share-lock all), then create the new one,
6446 6657590e Guido Trotter
    #    then one to remove, after
6447 5bbd3f7f Michael Hanselmann
    #  - removing the removal operation altogether
6448 6657590e Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6449 6657590e Guido Trotter
6450 6657590e Guido Trotter
  def DeclareLocks(self, level):
6451 6657590e Guido Trotter
    """Last minute lock declaration."""
6452 6657590e Guido Trotter
    # All nodes are locked anyway, so nothing to do here.
6453 a8083063 Iustin Pop
6454 a8083063 Iustin Pop
  def BuildHooksEnv(self):
6455 a8083063 Iustin Pop
    """Build hooks env.
6456 a8083063 Iustin Pop

6457 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
6458 a8083063 Iustin Pop

6459 a8083063 Iustin Pop
    """
6460 a8083063 Iustin Pop
    env = {
6461 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
6462 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
6463 a8083063 Iustin Pop
      }
6464 338e51e8 Iustin Pop
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
6465 d6a02168 Michael Hanselmann
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
6466 a8083063 Iustin Pop
          self.op.target_node]
6467 a8083063 Iustin Pop
    return env, nl, nl
6468 a8083063 Iustin Pop
6469 a8083063 Iustin Pop
  def CheckPrereq(self):
6470 a8083063 Iustin Pop
    """Check prerequisites.
6471 a8083063 Iustin Pop

6472 9ac99fda Guido Trotter
    This checks that the instance and node names are valid.
6473 a8083063 Iustin Pop

6474 a8083063 Iustin Pop
    """
6475 6657590e Guido Trotter
    instance_name = self.op.instance_name
6476 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
6477 6657590e Guido Trotter
    assert self.instance is not None, \
6478 6657590e Guido Trotter
          "Cannot retrieve locked instance %s" % self.op.instance_name
6479 43017d26 Iustin Pop
    _CheckNodeOnline(self, self.instance.primary_node)
6480 a8083063 Iustin Pop
6481 6657590e Guido Trotter
    self.dst_node = self.cfg.GetNodeInfo(
6482 6657590e Guido Trotter
      self.cfg.ExpandNodeName(self.op.target_node))
6483 a8083063 Iustin Pop
6484 268b8e42 Iustin Pop
    if self.dst_node is None:
6485 268b8e42 Iustin Pop
      # This is wrong node name, not a non-locked node
6486 268b8e42 Iustin Pop
      raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node)
6487 aeb83a2b Iustin Pop
    _CheckNodeOnline(self, self.dst_node.name)
6488 733a2b6a Iustin Pop
    _CheckNodeNotDrained(self, self.dst_node.name)
6489 a8083063 Iustin Pop
6490 b6023d6c Manuel Franceschini
    # instance disk type verification
6491 b6023d6c Manuel Franceschini
    for disk in self.instance.disks:
6492 b6023d6c Manuel Franceschini
      if disk.dev_type == constants.LD_FILE:
6493 b6023d6c Manuel Franceschini
        raise errors.OpPrereqError("Export not supported for instances with"
6494 b6023d6c Manuel Franceschini
                                   " file-based disks")
6495 b6023d6c Manuel Franceschini
6496 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
6497 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
6498 a8083063 Iustin Pop

6499 a8083063 Iustin Pop
    """
6500 a8083063 Iustin Pop
    instance = self.instance
6501 a8083063 Iustin Pop
    dst_node = self.dst_node
6502 a8083063 Iustin Pop
    src_node = instance.primary_node
6503 a8083063 Iustin Pop
    if self.op.shutdown:
6504 fb300fb7 Guido Trotter
      # shutdown the instance, but not the disks
6505 781de953 Iustin Pop
      result = self.rpc.call_instance_shutdown(src_node, instance)
6506 1fae010f Iustin Pop
      msg = result.RemoteFailMsg()
6507 1fae010f Iustin Pop
      if msg:
6508 1fae010f Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on"
6509 1fae010f Iustin Pop
                                 " node %s: %s" %
6510 1fae010f Iustin Pop
                                 (instance.name, src_node, msg))
6511 a8083063 Iustin Pop
6512 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
6513 a8083063 Iustin Pop
6514 a8083063 Iustin Pop
    snap_disks = []
6515 a8083063 Iustin Pop
6516 998c712c Iustin Pop
    # set the disks ID correctly since call_instance_start needs the
6517 998c712c Iustin Pop
    # correct drbd minor to create the symlinks
6518 998c712c Iustin Pop
    for disk in instance.disks:
6519 998c712c Iustin Pop
      self.cfg.SetDiskID(disk, src_node)
6520 998c712c Iustin Pop
6521 084f05a5 Iustin Pop
    # per-disk results
6522 084f05a5 Iustin Pop
    dresults = []
6523 a8083063 Iustin Pop
    try:
6524 a97da6b7 Iustin Pop
      for idx, disk in enumerate(instance.disks):
6525 19d7f90a Guido Trotter
        # new_dev_name will be a snapshot of an lvm leaf of the one we passed
6526 19d7f90a Guido Trotter
        new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
6527 781de953 Iustin Pop
        if new_dev_name.failed or not new_dev_name.data:
6528 a97da6b7 Iustin Pop
          self.LogWarning("Could not snapshot disk/%d on node %s",
6529 a97da6b7 Iustin Pop
                          idx, src_node)
6530 19d7f90a Guido Trotter
          snap_disks.append(False)
6531 19d7f90a Guido Trotter
        else:
6532 19d7f90a Guido Trotter
          new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
6533 781de953 Iustin Pop
                                 logical_id=(vgname, new_dev_name.data),
6534 781de953 Iustin Pop
                                 physical_id=(vgname, new_dev_name.data),
6535 19d7f90a Guido Trotter
                                 iv_name=disk.iv_name)
6536 19d7f90a Guido Trotter
          snap_disks.append(new_dev)
6537 a8083063 Iustin Pop
6538 a8083063 Iustin Pop
    finally:
6539 0d68c45d Iustin Pop
      if self.op.shutdown and instance.admin_up:
6540 0eca8e0c Iustin Pop
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6541 dd279568 Iustin Pop
        msg = result.RemoteFailMsg()
6542 dd279568 Iustin Pop
        if msg:
6543 b9bddb6b Iustin Pop
          _ShutdownInstanceDisks(self, instance)
6544 dd279568 Iustin Pop
          raise errors.OpExecError("Could not start instance: %s" % msg)
6545 a8083063 Iustin Pop
6546 a8083063 Iustin Pop
    # TODO: check for size
6547 a8083063 Iustin Pop
6548 62c9ec92 Iustin Pop
    cluster_name = self.cfg.GetClusterName()
6549 74c47259 Iustin Pop
    for idx, dev in enumerate(snap_disks):
6550 19d7f90a Guido Trotter
      if dev:
6551 781de953 Iustin Pop
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6552 781de953 Iustin Pop
                                               instance, cluster_name, idx)
6553 781de953 Iustin Pop
        if result.failed or not result.data:
6554 a97da6b7 Iustin Pop
          self.LogWarning("Could not export disk/%d from node %s to"
6555 a97da6b7 Iustin Pop
                          " node %s", idx, src_node, dst_node.name)
6556 084f05a5 Iustin Pop
          dresults.append(False)
6557 084f05a5 Iustin Pop
        else:
6558 084f05a5 Iustin Pop
          dresults.append(True)
6559 e1bc0878 Iustin Pop
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6560 e1bc0878 Iustin Pop
        if msg:
6561 a97da6b7 Iustin Pop
          self.LogWarning("Could not remove snapshot for disk/%d from node"
6562 a97da6b7 Iustin Pop
                          " %s: %s", idx, src_node, msg)
6563 084f05a5 Iustin Pop
      else:
6564 084f05a5 Iustin Pop
        dresults.append(False)
6565 a8083063 Iustin Pop
6566 781de953 Iustin Pop
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6567 084f05a5 Iustin Pop
    fin_resu = True
6568 781de953 Iustin Pop
    if result.failed or not result.data:
6569 19d7f90a Guido Trotter
      self.LogWarning("Could not finalize export for instance %s on node %s",
6570 19d7f90a Guido Trotter
                      instance.name, dst_node.name)
6571 084f05a5 Iustin Pop
      fin_resu = False
6572 a8083063 Iustin Pop
6573 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
6574 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
6575 a8083063 Iustin Pop
6576 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
6577 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
6578 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
6579 a8083063 Iustin Pop
    if nodelist:
6580 72737a7f Iustin Pop
      exportlist = self.rpc.call_export_list(nodelist)
6581 a8083063 Iustin Pop
      for node in exportlist:
6582 781de953 Iustin Pop
        if exportlist[node].failed:
6583 781de953 Iustin Pop
          continue
6584 781de953 Iustin Pop
        if instance.name in exportlist[node].data:
6585 72737a7f Iustin Pop
          if not self.rpc.call_export_remove(node, instance.name):
6586 19d7f90a Guido Trotter
            self.LogWarning("Could not remove older export for instance %s"
6587 19d7f90a Guido Trotter
                            " on node %s", instance.name, node)
6588 084f05a5 Iustin Pop
    return fin_resu, dresults
6589 5c947f38 Iustin Pop
6590 5c947f38 Iustin Pop
6591 9ac99fda Guido Trotter
class LURemoveExport(NoHooksLU):
6592 9ac99fda Guido Trotter
  """Remove exports related to the named instance.
6593 9ac99fda Guido Trotter

6594 9ac99fda Guido Trotter
  """
6595 9ac99fda Guido Trotter
  _OP_REQP = ["instance_name"]
6596 3656b3af Guido Trotter
  REQ_BGL = False
6597 3656b3af Guido Trotter
6598 3656b3af Guido Trotter
  def ExpandNames(self):
6599 3656b3af Guido Trotter
    self.needed_locks = {}
6600 3656b3af Guido Trotter
    # We need all nodes to be locked in order for RemoveExport to work, but we
6601 3656b3af Guido Trotter
    # don't need to lock the instance itself, as nothing will happen to it (and
6602 3656b3af Guido Trotter
    # we can remove exports also for a removed instance)
6603 3656b3af Guido Trotter
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6604 9ac99fda Guido Trotter
6605 9ac99fda Guido Trotter
  def CheckPrereq(self):
6606 9ac99fda Guido Trotter
    """Check prerequisites.
6607 9ac99fda Guido Trotter
    """
6608 9ac99fda Guido Trotter
    pass
6609 9ac99fda Guido Trotter
6610 9ac99fda Guido Trotter
  def Exec(self, feedback_fn):
6611 9ac99fda Guido Trotter
    """Remove any export.
6612 9ac99fda Guido Trotter

6613 9ac99fda Guido Trotter
    """
6614 9ac99fda Guido Trotter
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
6615 9ac99fda Guido Trotter
    # If the instance was not found we'll try with the name that was passed in.
6616 9ac99fda Guido Trotter
    # This will only work if it was an FQDN, though.
6617 9ac99fda Guido Trotter
    fqdn_warn = False
6618 9ac99fda Guido Trotter
    if not instance_name:
6619 9ac99fda Guido Trotter
      fqdn_warn = True
6620 9ac99fda Guido Trotter
      instance_name = self.op.instance_name
6621 9ac99fda Guido Trotter
6622 72737a7f Iustin Pop
    exportlist = self.rpc.call_export_list(self.acquired_locks[
6623 72737a7f Iustin Pop
      locking.LEVEL_NODE])
6624 9ac99fda Guido Trotter
    found = False
6625 9ac99fda Guido Trotter
    for node in exportlist:
6626 781de953 Iustin Pop
      if exportlist[node].failed:
6627 25361b9a Iustin Pop
        self.LogWarning("Failed to query node %s, continuing" % node)
6628 781de953 Iustin Pop
        continue
6629 781de953 Iustin Pop
      if instance_name in exportlist[node].data:
6630 9ac99fda Guido Trotter
        found = True
6631 781de953 Iustin Pop
        result = self.rpc.call_export_remove(node, instance_name)
6632 781de953 Iustin Pop
        if result.failed or not result.data:
6633 9a4f63d1 Iustin Pop
          logging.error("Could not remove export for instance %s"
6634 9a4f63d1 Iustin Pop
                        " on node %s", instance_name, node)
6635 9ac99fda Guido Trotter
6636 9ac99fda Guido Trotter
    if fqdn_warn and not found:
6637 9ac99fda Guido Trotter
      feedback_fn("Export not found. If trying to remove an export belonging"
6638 9ac99fda Guido Trotter
                  " to a deleted instance please use its Fully Qualified"
6639 9ac99fda Guido Trotter
                  " Domain Name.")
6640 9ac99fda Guido Trotter
6641 9ac99fda Guido Trotter
6642 fe267188 Iustin Pop
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
6643 5c947f38 Iustin Pop
  """Generic tags LU.
6644 5c947f38 Iustin Pop

6645 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
6646 5c947f38 Iustin Pop

6647 5c947f38 Iustin Pop
  """
6648 5c947f38 Iustin Pop
6649 8646adce Guido Trotter
  def ExpandNames(self):
6650 8646adce Guido Trotter
    self.needed_locks = {}
6651 8646adce Guido Trotter
    if self.op.kind == constants.TAG_NODE:
6652 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
6653 5c947f38 Iustin Pop
      if name is None:
6654 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
6655 3ecf6786 Iustin Pop
                                   (self.op.name,))
6656 5c947f38 Iustin Pop
      self.op.name = name
6657 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = name
6658 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
6659 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
6660 5c947f38 Iustin Pop
      if name is None:
6661 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
6662 3ecf6786 Iustin Pop
                                   (self.op.name,))
6663 5c947f38 Iustin Pop
      self.op.name = name
6664 8646adce Guido Trotter
      self.needed_locks[locking.LEVEL_INSTANCE] = name
6665 8646adce Guido Trotter
6666 8646adce Guido Trotter
  def CheckPrereq(self):
6667 8646adce Guido Trotter
    """Check prerequisites.
6668 8646adce Guido Trotter

6669 8646adce Guido Trotter
    """
6670 8646adce Guido Trotter
    if self.op.kind == constants.TAG_CLUSTER:
6671 8646adce Guido Trotter
      self.target = self.cfg.GetClusterInfo()
6672 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_NODE:
6673 8646adce Guido Trotter
      self.target = self.cfg.GetNodeInfo(self.op.name)
6674 8646adce Guido Trotter
    elif self.op.kind == constants.TAG_INSTANCE:
6675 8646adce Guido Trotter
      self.target = self.cfg.GetInstanceInfo(self.op.name)
6676 5c947f38 Iustin Pop
    else:
6677 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
6678 3ecf6786 Iustin Pop
                                 str(self.op.kind))
6679 5c947f38 Iustin Pop
6680 5c947f38 Iustin Pop
6681 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
6682 5c947f38 Iustin Pop
  """Returns the tags of a given object.
6683 5c947f38 Iustin Pop

6684 5c947f38 Iustin Pop
  """
6685 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
6686 8646adce Guido Trotter
  REQ_BGL = False
6687 5c947f38 Iustin Pop
6688 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6689 5c947f38 Iustin Pop
    """Returns the tag list.
6690 5c947f38 Iustin Pop

6691 5c947f38 Iustin Pop
    """
6692 5d414478 Oleksiy Mishchenko
    return list(self.target.GetTags())
6693 5c947f38 Iustin Pop
6694 5c947f38 Iustin Pop
6695 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
6696 73415719 Iustin Pop
  """Searches the tags for a given pattern.
6697 73415719 Iustin Pop

6698 73415719 Iustin Pop
  """
6699 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
6700 8646adce Guido Trotter
  REQ_BGL = False
6701 8646adce Guido Trotter
6702 8646adce Guido Trotter
  def ExpandNames(self):
6703 8646adce Guido Trotter
    self.needed_locks = {}
6704 73415719 Iustin Pop
6705 73415719 Iustin Pop
  def CheckPrereq(self):
6706 73415719 Iustin Pop
    """Check prerequisites.
6707 73415719 Iustin Pop

6708 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
6709 73415719 Iustin Pop

6710 73415719 Iustin Pop
    """
6711 73415719 Iustin Pop
    try:
6712 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
6713 73415719 Iustin Pop
    except re.error, err:
6714 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
6715 73415719 Iustin Pop
                                 (self.op.pattern, err))
6716 73415719 Iustin Pop
6717 73415719 Iustin Pop
  def Exec(self, feedback_fn):
6718 73415719 Iustin Pop
    """Returns the tag list.
6719 73415719 Iustin Pop

6720 73415719 Iustin Pop
    """
6721 73415719 Iustin Pop
    cfg = self.cfg
6722 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
6723 8646adce Guido Trotter
    ilist = cfg.GetAllInstancesInfo().values()
6724 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
6725 8646adce Guido Trotter
    nlist = cfg.GetAllNodesInfo().values()
6726 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
6727 73415719 Iustin Pop
    results = []
6728 73415719 Iustin Pop
    for path, target in tgts:
6729 73415719 Iustin Pop
      for tag in target.GetTags():
6730 73415719 Iustin Pop
        if self.re.search(tag):
6731 73415719 Iustin Pop
          results.append((path, tag))
6732 73415719 Iustin Pop
    return results
6733 73415719 Iustin Pop
6734 73415719 Iustin Pop
6735 f27302fa Iustin Pop
class LUAddTags(TagsLU):
6736 5c947f38 Iustin Pop
  """Sets a tag on a given object.
6737 5c947f38 Iustin Pop

6738 5c947f38 Iustin Pop
  """
6739 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6740 8646adce Guido Trotter
  REQ_BGL = False
6741 5c947f38 Iustin Pop
6742 5c947f38 Iustin Pop
  def CheckPrereq(self):
6743 5c947f38 Iustin Pop
    """Check prerequisites.
6744 5c947f38 Iustin Pop

6745 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
6746 5c947f38 Iustin Pop

6747 5c947f38 Iustin Pop
    """
6748 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6749 f27302fa Iustin Pop
    for tag in self.op.tags:
6750 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6751 5c947f38 Iustin Pop
6752 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6753 5c947f38 Iustin Pop
    """Sets the tag.
6754 5c947f38 Iustin Pop

6755 5c947f38 Iustin Pop
    """
6756 5c947f38 Iustin Pop
    try:
6757 f27302fa Iustin Pop
      for tag in self.op.tags:
6758 f27302fa Iustin Pop
        self.target.AddTag(tag)
6759 5c947f38 Iustin Pop
    except errors.TagError, err:
6760 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
6761 5c947f38 Iustin Pop
    try:
6762 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6763 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6764 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6765 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6766 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6767 5c947f38 Iustin Pop
6768 5c947f38 Iustin Pop
6769 f27302fa Iustin Pop
class LUDelTags(TagsLU):
6770 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
6771 5c947f38 Iustin Pop

6772 5c947f38 Iustin Pop
  """
6773 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
6774 8646adce Guido Trotter
  REQ_BGL = False
6775 5c947f38 Iustin Pop
6776 5c947f38 Iustin Pop
  def CheckPrereq(self):
6777 5c947f38 Iustin Pop
    """Check prerequisites.
6778 5c947f38 Iustin Pop

6779 5c947f38 Iustin Pop
    This checks that we have the given tag.
6780 5c947f38 Iustin Pop

6781 5c947f38 Iustin Pop
    """
6782 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
6783 f27302fa Iustin Pop
    for tag in self.op.tags:
6784 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
6785 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
6786 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
6787 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
6788 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
6789 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
6790 f27302fa Iustin Pop
      diff_names.sort()
6791 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
6792 f27302fa Iustin Pop
                                 (",".join(diff_names)))
6793 5c947f38 Iustin Pop
6794 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
6795 5c947f38 Iustin Pop
    """Remove the tag from the object.
6796 5c947f38 Iustin Pop

6797 5c947f38 Iustin Pop
    """
6798 f27302fa Iustin Pop
    for tag in self.op.tags:
6799 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
6800 5c947f38 Iustin Pop
    try:
6801 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
6802 5c947f38 Iustin Pop
    except errors.ConfigurationError:
6803 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
6804 3ecf6786 Iustin Pop
                                " config file and the operation has been"
6805 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
6806 06009e27 Iustin Pop
6807 0eed6e61 Guido Trotter
6808 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
6809 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
6810 06009e27 Iustin Pop

6811 0b097284 Guido Trotter
  This LU sleeps on the master and/or nodes for a specified amount of
6812 06009e27 Iustin Pop
  time.
6813 06009e27 Iustin Pop

6814 06009e27 Iustin Pop
  """
6815 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
6816 fbe9022f Guido Trotter
  REQ_BGL = False
6817 06009e27 Iustin Pop
6818 fbe9022f Guido Trotter
  def ExpandNames(self):
6819 fbe9022f Guido Trotter
    """Expand names and set required locks.
6820 06009e27 Iustin Pop

6821 fbe9022f Guido Trotter
    This expands the node list, if any.
6822 06009e27 Iustin Pop

6823 06009e27 Iustin Pop
    """
6824 fbe9022f Guido Trotter
    self.needed_locks = {}
6825 06009e27 Iustin Pop
    if self.op.on_nodes:
6826 fbe9022f Guido Trotter
      # _GetWantedNodes can be used here, but is not always appropriate to use
6827 fbe9022f Guido Trotter
      # this way in ExpandNames. Check LogicalUnit.ExpandNames docstring for
6828 fbe9022f Guido Trotter
      # more information.
6829 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
6830 fbe9022f Guido Trotter
      self.needed_locks[locking.LEVEL_NODE] = self.op.on_nodes
6831 fbe9022f Guido Trotter
6832 fbe9022f Guido Trotter
  def CheckPrereq(self):
6833 fbe9022f Guido Trotter
    """Check prerequisites.
6834 fbe9022f Guido Trotter

6835 fbe9022f Guido Trotter
    """
6836 06009e27 Iustin Pop
6837 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
6838 06009e27 Iustin Pop
    """Do the actual sleep.
6839 06009e27 Iustin Pop

6840 06009e27 Iustin Pop
    """
6841 06009e27 Iustin Pop
    if self.op.on_master:
6842 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
6843 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
6844 06009e27 Iustin Pop
    if self.op.on_nodes:
6845 72737a7f Iustin Pop
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6846 06009e27 Iustin Pop
      if not result:
6847 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
6848 06009e27 Iustin Pop
      for node, node_result in result.items():
6849 781de953 Iustin Pop
        node_result.Raise()
6850 781de953 Iustin Pop
        if not node_result.data:
6851 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
6852 781de953 Iustin Pop
                                   " result: %s" % (node, node_result.data))
6853 d61df03e Iustin Pop
6854 d61df03e Iustin Pop
6855 d1c2dd75 Iustin Pop
class IAllocator(object):
6856 d1c2dd75 Iustin Pop
  """IAllocator framework.
6857 d61df03e Iustin Pop

6858 d1c2dd75 Iustin Pop
  An IAllocator instance has three sets of attributes:
6859 d6a02168 Michael Hanselmann
    - cfg that is needed to query the cluster
6860 d1c2dd75 Iustin Pop
    - input data (all members of the _KEYS class attribute are required)
6861 d1c2dd75 Iustin Pop
    - four buffer attributes (in|out_data|text), that represent the
6862 d1c2dd75 Iustin Pop
      input (to the external script) in text and data structure format,
6863 d1c2dd75 Iustin Pop
      and the output from it, again in two formats
6864 d1c2dd75 Iustin Pop
    - the result variables from the script (success, info, nodes) for
6865 d1c2dd75 Iustin Pop
      easy usage
6866 d61df03e Iustin Pop

6867 d61df03e Iustin Pop
  """
6868 29859cb7 Iustin Pop
  _ALLO_KEYS = [
6869 d1c2dd75 Iustin Pop
    "mem_size", "disks", "disk_template",
6870 8cc7e742 Guido Trotter
    "os", "tags", "nics", "vcpus", "hypervisor",
6871 d1c2dd75 Iustin Pop
    ]
6872 29859cb7 Iustin Pop
  _RELO_KEYS = [
6873 29859cb7 Iustin Pop
    "relocate_from",
6874 29859cb7 Iustin Pop
    ]
6875 d1c2dd75 Iustin Pop
6876 72737a7f Iustin Pop
  def __init__(self, lu, mode, name, **kwargs):
6877 72737a7f Iustin Pop
    self.lu = lu
6878 d1c2dd75 Iustin Pop
    # init buffer variables
6879 d1c2dd75 Iustin Pop
    self.in_text = self.out_text = self.in_data = self.out_data = None
6880 d1c2dd75 Iustin Pop
    # init all input fields so that pylint is happy
6881 29859cb7 Iustin Pop
    self.mode = mode
6882 29859cb7 Iustin Pop
    self.name = name
6883 d1c2dd75 Iustin Pop
    self.mem_size = self.disks = self.disk_template = None
6884 d1c2dd75 Iustin Pop
    self.os = self.tags = self.nics = self.vcpus = None
6885 a0add446 Iustin Pop
    self.hypervisor = None
6886 29859cb7 Iustin Pop
    self.relocate_from = None
6887 27579978 Iustin Pop
    # computed fields
6888 27579978 Iustin Pop
    self.required_nodes = None
6889 d1c2dd75 Iustin Pop
    # init result fields
6890 d1c2dd75 Iustin Pop
    self.success = self.info = self.nodes = None
6891 29859cb7 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6892 29859cb7 Iustin Pop
      keyset = self._ALLO_KEYS
6893 29859cb7 Iustin Pop
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6894 29859cb7 Iustin Pop
      keyset = self._RELO_KEYS
6895 29859cb7 Iustin Pop
    else:
6896 29859cb7 Iustin Pop
      raise errors.ProgrammerError("Unknown mode '%s' passed to the"
6897 29859cb7 Iustin Pop
                                   " IAllocator" % self.mode)
6898 d1c2dd75 Iustin Pop
    for key in kwargs:
6899 29859cb7 Iustin Pop
      if key not in keyset:
6900 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Invalid input parameter '%s' to"
6901 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6902 d1c2dd75 Iustin Pop
      setattr(self, key, kwargs[key])
6903 29859cb7 Iustin Pop
    for key in keyset:
6904 d1c2dd75 Iustin Pop
      if key not in kwargs:
6905 d1c2dd75 Iustin Pop
        raise errors.ProgrammerError("Missing input parameter '%s' to"
6906 d1c2dd75 Iustin Pop
                                     " IAllocator" % key)
6907 d1c2dd75 Iustin Pop
    self._BuildInputData()
6908 d1c2dd75 Iustin Pop
6909 d1c2dd75 Iustin Pop
  def _ComputeClusterData(self):
6910 d1c2dd75 Iustin Pop
    """Compute the generic allocator input data.
6911 d1c2dd75 Iustin Pop

6912 d1c2dd75 Iustin Pop
    This is the data that is independent of the actual operation.
6913 d1c2dd75 Iustin Pop

6914 d1c2dd75 Iustin Pop
    """
6915 72737a7f Iustin Pop
    cfg = self.lu.cfg
6916 e69d05fd Iustin Pop
    cluster_info = cfg.GetClusterInfo()
6917 d1c2dd75 Iustin Pop
    # cluster data
6918 d1c2dd75 Iustin Pop
    data = {
6919 77031881 Iustin Pop
      "version": constants.IALLOCATOR_VERSION,
6920 72737a7f Iustin Pop
      "cluster_name": cfg.GetClusterName(),
6921 e69d05fd Iustin Pop
      "cluster_tags": list(cluster_info.GetTags()),
6922 1325da74 Iustin Pop
      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
6923 d1c2dd75 Iustin Pop
      # we don't have job IDs
6924 d61df03e Iustin Pop
      }
6925 b57e9819 Guido Trotter
    iinfo = cfg.GetAllInstancesInfo().values()
6926 b57e9819 Guido Trotter
    i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
6927 6286519f Iustin Pop
6928 d1c2dd75 Iustin Pop
    # node data
6929 d1c2dd75 Iustin Pop
    node_results = {}
6930 d1c2dd75 Iustin Pop
    node_list = cfg.GetNodeList()
6931 8cc7e742 Guido Trotter
6932 8cc7e742 Guido Trotter
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
6933 a0add446 Iustin Pop
      hypervisor_name = self.hypervisor
6934 8cc7e742 Guido Trotter
    elif self.mode == constants.IALLOCATOR_MODE_RELOC:
6935 a0add446 Iustin Pop
      hypervisor_name = cfg.GetInstanceInfo(self.name).hypervisor
6936 8cc7e742 Guido Trotter
6937 72737a7f Iustin Pop
    node_data = self.lu.rpc.call_node_info(node_list, cfg.GetVGName(),
6938 a0add446 Iustin Pop
                                           hypervisor_name)
6939 18640d69 Guido Trotter
    node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
6940 18640d69 Guido Trotter
                       cluster_info.enabled_hypervisors)
6941 1325da74 Iustin Pop
    for nname, nresult in node_data.items():
6942 1325da74 Iustin Pop
      # first fill in static (config-based) values
6943 d1c2dd75 Iustin Pop
      ninfo = cfg.GetNodeInfo(nname)
6944 d1c2dd75 Iustin Pop
      pnr = {
6945 d1c2dd75 Iustin Pop
        "tags": list(ninfo.GetTags()),
6946 d1c2dd75 Iustin Pop
        "primary_ip": ninfo.primary_ip,
6947 d1c2dd75 Iustin Pop
        "secondary_ip": ninfo.secondary_ip,
6948 fc0fe88c Iustin Pop
        "offline": ninfo.offline,
6949 0b2454b9 Iustin Pop
        "drained": ninfo.drained,
6950 1325da74 Iustin Pop
        "master_candidate": ninfo.master_candidate,
6951 d1c2dd75 Iustin Pop
        }
6952 1325da74 Iustin Pop
6953 0d853843 Iustin Pop
      if not (ninfo.offline or ninfo.drained):
6954 1325da74 Iustin Pop
        nresult.Raise()
6955 1325da74 Iustin Pop
        if not isinstance(nresult.data, dict):
6956 1325da74 Iustin Pop
          raise errors.OpExecError("Can't get data for node %s" % nname)
6957 1325da74 Iustin Pop
        remote_info = nresult.data
6958 1325da74 Iustin Pop
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6959 1325da74 Iustin Pop
                     'vg_size', 'vg_free', 'cpu_total']:
6960 1325da74 Iustin Pop
          if attr not in remote_info:
6961 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' didn't return attribute"
6962 1325da74 Iustin Pop
                                     " '%s'" % (nname, attr))
6963 1325da74 Iustin Pop
          try:
6964 1325da74 Iustin Pop
            remote_info[attr] = int(remote_info[attr])
6965 1325da74 Iustin Pop
          except ValueError, err:
6966 1325da74 Iustin Pop
            raise errors.OpExecError("Node '%s' returned invalid value"
6967 1325da74 Iustin Pop
                                     " for '%s': %s" % (nname, attr, err))
6968 1325da74 Iustin Pop
        # compute memory used by primary instances
6969 1325da74 Iustin Pop
        i_p_mem = i_p_up_mem = 0
6970 1325da74 Iustin Pop
        for iinfo, beinfo in i_list:
6971 1325da74 Iustin Pop
          if iinfo.primary_node == nname:
6972 1325da74 Iustin Pop
            i_p_mem += beinfo[constants.BE_MEMORY]
6973 1325da74 Iustin Pop
            if iinfo.name not in node_iinfo[nname].data:
6974 1325da74 Iustin Pop
              i_used_mem = 0
6975 1325da74 Iustin Pop
            else:
6976 1325da74 Iustin Pop
              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
6977 1325da74 Iustin Pop
            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
6978 1325da74 Iustin Pop
            remote_info['memory_free'] -= max(0, i_mem_diff)
6979 1325da74 Iustin Pop
6980 1325da74 Iustin Pop
            if iinfo.admin_up:
6981 1325da74 Iustin Pop
              i_p_up_mem += beinfo[constants.BE_MEMORY]
6982 1325da74 Iustin Pop
6983 1325da74 Iustin Pop
        # compute memory used by instances
6984 1325da74 Iustin Pop
        pnr_dyn = {
6985 1325da74 Iustin Pop
          "total_memory": remote_info['memory_total'],
6986 1325da74 Iustin Pop
          "reserved_memory": remote_info['memory_dom0'],
6987 1325da74 Iustin Pop
          "free_memory": remote_info['memory_free'],
6988 1325da74 Iustin Pop
          "total_disk": remote_info['vg_size'],
6989 1325da74 Iustin Pop
          "free_disk": remote_info['vg_free'],
6990 1325da74 Iustin Pop
          "total_cpus": remote_info['cpu_total'],
6991 1325da74 Iustin Pop
          "i_pri_memory": i_p_mem,
6992 1325da74 Iustin Pop
          "i_pri_up_memory": i_p_up_mem,
6993 1325da74 Iustin Pop
          }
6994 1325da74 Iustin Pop
        pnr.update(pnr_dyn)
6995 1325da74 Iustin Pop
6996 d1c2dd75 Iustin Pop
      node_results[nname] = pnr
6997 d1c2dd75 Iustin Pop
    data["nodes"] = node_results
6998 d1c2dd75 Iustin Pop
6999 d1c2dd75 Iustin Pop
    # instance data
7000 d1c2dd75 Iustin Pop
    instance_data = {}
7001 338e51e8 Iustin Pop
    for iinfo, beinfo in i_list:
7002 d1c2dd75 Iustin Pop
      nic_data = [{"mac": n.mac, "ip": n.ip, "bridge": n.bridge}
7003 d1c2dd75 Iustin Pop
                  for n in iinfo.nics]
7004 d1c2dd75 Iustin Pop
      pir = {
7005 d1c2dd75 Iustin Pop
        "tags": list(iinfo.GetTags()),
7006 1325da74 Iustin Pop
        "admin_up": iinfo.admin_up,
7007 338e51e8 Iustin Pop
        "vcpus": beinfo[constants.BE_VCPUS],
7008 338e51e8 Iustin Pop
        "memory": beinfo[constants.BE_MEMORY],
7009 d1c2dd75 Iustin Pop
        "os": iinfo.os,
7010 1325da74 Iustin Pop
        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
7011 d1c2dd75 Iustin Pop
        "nics": nic_data,
7012 1325da74 Iustin Pop
        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
7013 d1c2dd75 Iustin Pop
        "disk_template": iinfo.disk_template,
7014 e69d05fd Iustin Pop
        "hypervisor": iinfo.hypervisor,
7015 d1c2dd75 Iustin Pop
        }
7016 88ae4f85 Iustin Pop
      pir["disk_space_total"] = _ComputeDiskSize(iinfo.disk_template,
7017 88ae4f85 Iustin Pop
                                                 pir["disks"])
7018 768f0a80 Iustin Pop
      instance_data[iinfo.name] = pir
7019 d61df03e Iustin Pop
7020 d1c2dd75 Iustin Pop
    data["instances"] = instance_data
7021 d61df03e Iustin Pop
7022 d1c2dd75 Iustin Pop
    self.in_data = data
7023 d61df03e Iustin Pop
7024 d1c2dd75 Iustin Pop
  def _AddNewInstance(self):
7025 d1c2dd75 Iustin Pop
    """Add new instance data to allocator structure.
7026 d61df03e Iustin Pop

7027 d1c2dd75 Iustin Pop
    This in combination with _AllocatorGetClusterData will create the
7028 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7029 d61df03e Iustin Pop

7030 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7031 d1c2dd75 Iustin Pop
    done.
7032 d61df03e Iustin Pop

7033 d1c2dd75 Iustin Pop
    """
7034 d1c2dd75 Iustin Pop
    data = self.in_data
7035 d1c2dd75 Iustin Pop
7036 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(self.disk_template, self.disks)
7037 d1c2dd75 Iustin Pop
7038 27579978 Iustin Pop
    if self.disk_template in constants.DTS_NET_MIRROR:
7039 27579978 Iustin Pop
      self.required_nodes = 2
7040 27579978 Iustin Pop
    else:
7041 27579978 Iustin Pop
      self.required_nodes = 1
7042 d1c2dd75 Iustin Pop
    request = {
7043 d1c2dd75 Iustin Pop
      "type": "allocate",
7044 d1c2dd75 Iustin Pop
      "name": self.name,
7045 d1c2dd75 Iustin Pop
      "disk_template": self.disk_template,
7046 d1c2dd75 Iustin Pop
      "tags": self.tags,
7047 d1c2dd75 Iustin Pop
      "os": self.os,
7048 d1c2dd75 Iustin Pop
      "vcpus": self.vcpus,
7049 d1c2dd75 Iustin Pop
      "memory": self.mem_size,
7050 d1c2dd75 Iustin Pop
      "disks": self.disks,
7051 d1c2dd75 Iustin Pop
      "disk_space_total": disk_space,
7052 d1c2dd75 Iustin Pop
      "nics": self.nics,
7053 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7054 d1c2dd75 Iustin Pop
      }
7055 d1c2dd75 Iustin Pop
    data["request"] = request
7056 298fe380 Iustin Pop
7057 d1c2dd75 Iustin Pop
  def _AddRelocateInstance(self):
7058 d1c2dd75 Iustin Pop
    """Add relocate instance data to allocator structure.
7059 298fe380 Iustin Pop

7060 d1c2dd75 Iustin Pop
    This in combination with _IAllocatorGetClusterData will create the
7061 d1c2dd75 Iustin Pop
    correct structure needed as input for the allocator.
7062 d61df03e Iustin Pop

7063 d1c2dd75 Iustin Pop
    The checks for the completeness of the opcode must have already been
7064 d1c2dd75 Iustin Pop
    done.
7065 d61df03e Iustin Pop

7066 d1c2dd75 Iustin Pop
    """
7067 72737a7f Iustin Pop
    instance = self.lu.cfg.GetInstanceInfo(self.name)
7068 27579978 Iustin Pop
    if instance is None:
7069 27579978 Iustin Pop
      raise errors.ProgrammerError("Unknown instance '%s' passed to"
7070 27579978 Iustin Pop
                                   " IAllocator" % self.name)
7071 27579978 Iustin Pop
7072 27579978 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
7073 27579978 Iustin Pop
      raise errors.OpPrereqError("Can't relocate non-mirrored instances")
7074 27579978 Iustin Pop
7075 2a139bb0 Iustin Pop
    if len(instance.secondary_nodes) != 1:
7076 2a139bb0 Iustin Pop
      raise errors.OpPrereqError("Instance has not exactly one secondary node")
7077 2a139bb0 Iustin Pop
7078 27579978 Iustin Pop
    self.required_nodes = 1
7079 dafc7302 Guido Trotter
    disk_sizes = [{'size': disk.size} for disk in instance.disks]
7080 dafc7302 Guido Trotter
    disk_space = _ComputeDiskSize(instance.disk_template, disk_sizes)
7081 27579978 Iustin Pop
7082 d1c2dd75 Iustin Pop
    request = {
7083 2a139bb0 Iustin Pop
      "type": "relocate",
7084 d1c2dd75 Iustin Pop
      "name": self.name,
7085 27579978 Iustin Pop
      "disk_space_total": disk_space,
7086 27579978 Iustin Pop
      "required_nodes": self.required_nodes,
7087 29859cb7 Iustin Pop
      "relocate_from": self.relocate_from,
7088 d1c2dd75 Iustin Pop
      }
7089 27579978 Iustin Pop
    self.in_data["request"] = request
7090 d61df03e Iustin Pop
7091 d1c2dd75 Iustin Pop
  def _BuildInputData(self):
7092 d1c2dd75 Iustin Pop
    """Build input data structures.
7093 d61df03e Iustin Pop

7094 d1c2dd75 Iustin Pop
    """
7095 d1c2dd75 Iustin Pop
    self._ComputeClusterData()
7096 d61df03e Iustin Pop
7097 d1c2dd75 Iustin Pop
    if self.mode == constants.IALLOCATOR_MODE_ALLOC:
7098 d1c2dd75 Iustin Pop
      self._AddNewInstance()
7099 d1c2dd75 Iustin Pop
    else:
7100 d1c2dd75 Iustin Pop
      self._AddRelocateInstance()
7101 d61df03e Iustin Pop
7102 d1c2dd75 Iustin Pop
    self.in_text = serializer.Dump(self.in_data)
7103 d61df03e Iustin Pop
7104 72737a7f Iustin Pop
  def Run(self, name, validate=True, call_fn=None):
7105 d1c2dd75 Iustin Pop
    """Run an instance allocator and return the results.
7106 298fe380 Iustin Pop

7107 d1c2dd75 Iustin Pop
    """
7108 72737a7f Iustin Pop
    if call_fn is None:
7109 72737a7f Iustin Pop
      call_fn = self.lu.rpc.call_iallocator_runner
7110 298fe380 Iustin Pop
7111 72737a7f Iustin Pop
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7112 781de953 Iustin Pop
    result.Raise()
7113 298fe380 Iustin Pop
7114 781de953 Iustin Pop
    if not isinstance(result.data, (list, tuple)) or len(result.data) != 4:
7115 8d528b7c Iustin Pop
      raise errors.OpExecError("Invalid result from master iallocator runner")
7116 8d528b7c Iustin Pop
7117 781de953 Iustin Pop
    rcode, stdout, stderr, fail = result.data
7118 8d528b7c Iustin Pop
7119 8d528b7c Iustin Pop
    if rcode == constants.IARUN_NOTFOUND:
7120 8d528b7c Iustin Pop
      raise errors.OpExecError("Can't find allocator '%s'" % name)
7121 8d528b7c Iustin Pop
    elif rcode == constants.IARUN_FAILURE:
7122 38206f3c Iustin Pop
      raise errors.OpExecError("Instance allocator call failed: %s,"
7123 38206f3c Iustin Pop
                               " output: %s" % (fail, stdout+stderr))
7124 8d528b7c Iustin Pop
    self.out_text = stdout
7125 d1c2dd75 Iustin Pop
    if validate:
7126 d1c2dd75 Iustin Pop
      self._ValidateResult()
7127 298fe380 Iustin Pop
7128 d1c2dd75 Iustin Pop
  def _ValidateResult(self):
7129 d1c2dd75 Iustin Pop
    """Process the allocator results.
7130 538475ca Iustin Pop

7131 d1c2dd75 Iustin Pop
    This will process and if successful save the result in
7132 d1c2dd75 Iustin Pop
    self.out_data and the other parameters.
7133 538475ca Iustin Pop

7134 d1c2dd75 Iustin Pop
    """
7135 d1c2dd75 Iustin Pop
    try:
7136 d1c2dd75 Iustin Pop
      rdict = serializer.Load(self.out_text)
7137 d1c2dd75 Iustin Pop
    except Exception, err:
7138 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
7139 d1c2dd75 Iustin Pop
7140 d1c2dd75 Iustin Pop
    if not isinstance(rdict, dict):
7141 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: not a dict")
7142 538475ca Iustin Pop
7143 d1c2dd75 Iustin Pop
    for key in "success", "info", "nodes":
7144 d1c2dd75 Iustin Pop
      if key not in rdict:
7145 d1c2dd75 Iustin Pop
        raise errors.OpExecError("Can't parse iallocator results:"
7146 d1c2dd75 Iustin Pop
                                 " missing key '%s'" % key)
7147 d1c2dd75 Iustin Pop
      setattr(self, key, rdict[key])
7148 538475ca Iustin Pop
7149 d1c2dd75 Iustin Pop
    if not isinstance(rdict["nodes"], list):
7150 d1c2dd75 Iustin Pop
      raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
7151 d1c2dd75 Iustin Pop
                               " is not a list")
7152 d1c2dd75 Iustin Pop
    self.out_data = rdict
7153 538475ca Iustin Pop
7154 538475ca Iustin Pop
7155 d61df03e Iustin Pop
class LUTestAllocator(NoHooksLU):
7156 d61df03e Iustin Pop
  """Run allocator tests.
7157 d61df03e Iustin Pop

7158 d61df03e Iustin Pop
  This LU runs the allocator tests
7159 d61df03e Iustin Pop

7160 d61df03e Iustin Pop
  """
7161 d61df03e Iustin Pop
  _OP_REQP = ["direction", "mode", "name"]
7162 d61df03e Iustin Pop
7163 d61df03e Iustin Pop
  def CheckPrereq(self):
7164 d61df03e Iustin Pop
    """Check prerequisites.
7165 d61df03e Iustin Pop

7166 d61df03e Iustin Pop
    This checks the opcode parameters depending on the director and mode test.
7167 d61df03e Iustin Pop

7168 d61df03e Iustin Pop
    """
7169 298fe380 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7170 d61df03e Iustin Pop
      for attr in ["name", "mem_size", "disks", "disk_template",
7171 d61df03e Iustin Pop
                   "os", "tags", "nics", "vcpus"]:
7172 d61df03e Iustin Pop
        if not hasattr(self.op, attr):
7173 d61df03e Iustin Pop
          raise errors.OpPrereqError("Missing attribute '%s' on opcode input" %
7174 d61df03e Iustin Pop
                                     attr)
7175 d61df03e Iustin Pop
      iname = self.cfg.ExpandInstanceName(self.op.name)
7176 d61df03e Iustin Pop
      if iname is not None:
7177 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' already in the cluster" %
7178 d61df03e Iustin Pop
                                   iname)
7179 d61df03e Iustin Pop
      if not isinstance(self.op.nics, list):
7180 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'nics'")
7181 d61df03e Iustin Pop
      for row in self.op.nics:
7182 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7183 d61df03e Iustin Pop
            "mac" not in row or
7184 d61df03e Iustin Pop
            "ip" not in row or
7185 d61df03e Iustin Pop
            "bridge" not in row):
7186 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7187 d61df03e Iustin Pop
                                     " 'nics' parameter")
7188 d61df03e Iustin Pop
      if not isinstance(self.op.disks, list):
7189 d61df03e Iustin Pop
        raise errors.OpPrereqError("Invalid parameter 'disks'")
7190 d61df03e Iustin Pop
      for row in self.op.disks:
7191 d61df03e Iustin Pop
        if (not isinstance(row, dict) or
7192 d61df03e Iustin Pop
            "size" not in row or
7193 d61df03e Iustin Pop
            not isinstance(row["size"], int) or
7194 d61df03e Iustin Pop
            "mode" not in row or
7195 d61df03e Iustin Pop
            row["mode"] not in ['r', 'w']):
7196 d61df03e Iustin Pop
          raise errors.OpPrereqError("Invalid contents of the"
7197 d61df03e Iustin Pop
                                     " 'disks' parameter")
7198 8901997e Iustin Pop
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
7199 8cc7e742 Guido Trotter
        self.op.hypervisor = self.cfg.GetHypervisorType()
7200 298fe380 Iustin Pop
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
7201 d61df03e Iustin Pop
      if not hasattr(self.op, "name"):
7202 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing attribute 'name' on opcode input")
7203 d61df03e Iustin Pop
      fname = self.cfg.ExpandInstanceName(self.op.name)
7204 d61df03e Iustin Pop
      if fname is None:
7205 d61df03e Iustin Pop
        raise errors.OpPrereqError("Instance '%s' not found for relocation" %
7206 d61df03e Iustin Pop
                                   self.op.name)
7207 d61df03e Iustin Pop
      self.op.name = fname
7208 29859cb7 Iustin Pop
      self.relocate_from = self.cfg.GetInstanceInfo(fname).secondary_nodes
7209 d61df03e Iustin Pop
    else:
7210 d61df03e Iustin Pop
      raise errors.OpPrereqError("Invalid test allocator mode '%s'" %
7211 d61df03e Iustin Pop
                                 self.op.mode)
7212 d61df03e Iustin Pop
7213 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
7214 298fe380 Iustin Pop
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
7215 d61df03e Iustin Pop
        raise errors.OpPrereqError("Missing allocator name")
7216 298fe380 Iustin Pop
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:
7217 d61df03e Iustin Pop
      raise errors.OpPrereqError("Wrong allocator test '%s'" %
7218 d61df03e Iustin Pop
                                 self.op.direction)
7219 d61df03e Iustin Pop
7220 d61df03e Iustin Pop
  def Exec(self, feedback_fn):
7221 d61df03e Iustin Pop
    """Run the allocator test.
7222 d61df03e Iustin Pop

7223 d61df03e Iustin Pop
    """
7224 29859cb7 Iustin Pop
    if self.op.mode == constants.IALLOCATOR_MODE_ALLOC:
7225 72737a7f Iustin Pop
      ial = IAllocator(self,
7226 29859cb7 Iustin Pop
                       mode=self.op.mode,
7227 29859cb7 Iustin Pop
                       name=self.op.name,
7228 29859cb7 Iustin Pop
                       mem_size=self.op.mem_size,
7229 29859cb7 Iustin Pop
                       disks=self.op.disks,
7230 29859cb7 Iustin Pop
                       disk_template=self.op.disk_template,
7231 29859cb7 Iustin Pop
                       os=self.op.os,
7232 29859cb7 Iustin Pop
                       tags=self.op.tags,
7233 29859cb7 Iustin Pop
                       nics=self.op.nics,
7234 29859cb7 Iustin Pop
                       vcpus=self.op.vcpus,
7235 8cc7e742 Guido Trotter
                       hypervisor=self.op.hypervisor,
7236 29859cb7 Iustin Pop
                       )
7237 29859cb7 Iustin Pop
    else:
7238 72737a7f Iustin Pop
      ial = IAllocator(self,
7239 29859cb7 Iustin Pop
                       mode=self.op.mode,
7240 29859cb7 Iustin Pop
                       name=self.op.name,
7241 29859cb7 Iustin Pop
                       relocate_from=list(self.relocate_from),
7242 29859cb7 Iustin Pop
                       )
7243 d61df03e Iustin Pop
7244 298fe380 Iustin Pop
    if self.op.direction == constants.IALLOCATOR_DIR_IN:
7245 d1c2dd75 Iustin Pop
      result = ial.in_text
7246 298fe380 Iustin Pop
    else:
7247 d1c2dd75 Iustin Pop
      ial.Run(self.op.allocator, validate=False)
7248 d1c2dd75 Iustin Pop
      result = ial.out_text
7249 298fe380 Iustin Pop
    return result